aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/media/video/ivtv/ivtv-irq.c
diff options
context:
space:
mode:
authorHans Verkuil <hverkuil@xs4all.nl>2007-07-28 18:45:50 -0400
committerMauro Carvalho Chehab <mchehab@infradead.org>2007-10-09 21:05:32 -0400
commit37093b1ea600d84fbf7252baf12eedec85ae40f1 (patch)
treeba78b73933c0d7b8989831c49a86f16c26f99b04 /drivers/media/video/ivtv/ivtv-irq.c
parentf4071b85ea0ca3bd06f63c330562b4cfdffa8473 (diff)
V4L/DVB (6047): ivtv: Fix scatter/gather DMA timeouts
It turns out that the cx23415/6 DMA engine cannot do scatter/gather DMA reliably. Every so often depending on the phase of the moon and your hardware configuration the cx2341x DMA engine simply chokes on it and you have to reboot to get it working again. This change replaced the scatter/gather DMA by single transfers at a time, where the driver is now responsible for DMA-ing each buffer. UDMA is still done using scatter/gather DMA, that will be fixed soon. Many thanks to Mark Bryars <mark.bryars@etvinteractive.com> for discovering the link between scatter/gather and the DMA timeouts. Signed-off-by: Hans Verkuil <hverkuil@xs4all.nl> Signed-off-by: Mauro Carvalho Chehab <mchehab@infradead.org>
Diffstat (limited to 'drivers/media/video/ivtv/ivtv-irq.c')
-rw-r--r--drivers/media/video/ivtv/ivtv-irq.c234
1 files changed, 141 insertions, 93 deletions
diff --git a/drivers/media/video/ivtv/ivtv-irq.c b/drivers/media/video/ivtv/ivtv-irq.c
index 8644f3dda31e..9695e5356163 100644
--- a/drivers/media/video/ivtv/ivtv-irq.c
+++ b/drivers/media/video/ivtv/ivtv-irq.c
@@ -60,18 +60,18 @@ static void ivtv_pio_work_handler(struct ivtv *itv)
60 buf = list_entry(s->q_dma.list.next, struct ivtv_buffer, list); 60 buf = list_entry(s->q_dma.list.next, struct ivtv_buffer, list);
61 list_for_each(p, &s->q_dma.list) { 61 list_for_each(p, &s->q_dma.list) {
62 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list); 62 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
63 u32 size = s->PIOarray[i].size & 0x3ffff; 63 u32 size = s->sg_processing[i].size & 0x3ffff;
64 64
65 /* Copy the data from the card to the buffer */ 65 /* Copy the data from the card to the buffer */
66 if (s->type == IVTV_DEC_STREAM_TYPE_VBI) { 66 if (s->type == IVTV_DEC_STREAM_TYPE_VBI) {
67 memcpy_fromio(buf->buf, itv->dec_mem + s->PIOarray[i].src - IVTV_DECODER_OFFSET, size); 67 memcpy_fromio(buf->buf, itv->dec_mem + s->sg_processing[i].src - IVTV_DECODER_OFFSET, size);
68 } 68 }
69 else { 69 else {
70 memcpy_fromio(buf->buf, itv->enc_mem + s->PIOarray[i].src, size); 70 memcpy_fromio(buf->buf, itv->enc_mem + s->sg_processing[i].src, size);
71 } 71 }
72 if (s->PIOarray[i].size & 0x80000000)
73 break;
74 i++; 72 i++;
73 if (i == s->sg_processing_size)
74 break;
75 } 75 }
76 write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44); 76 write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
77} 77}
@@ -105,7 +105,7 @@ static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MA
105 u32 offset, size; 105 u32 offset, size;
106 u32 UVoffset = 0, UVsize = 0; 106 u32 UVoffset = 0, UVsize = 0;
107 int skip_bufs = s->q_predma.buffers; 107 int skip_bufs = s->q_predma.buffers;
108 int idx = s->SG_length; 108 int idx = s->sg_pending_size;
109 int rc; 109 int rc;
110 110
111 /* sanity checks */ 111 /* sanity checks */
@@ -123,7 +123,7 @@ static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MA
123 case IVTV_ENC_STREAM_TYPE_MPG: 123 case IVTV_ENC_STREAM_TYPE_MPG:
124 offset = data[1]; 124 offset = data[1];
125 size = data[2]; 125 size = data[2];
126 s->dma_pts = 0; 126 s->pending_pts = 0;
127 break; 127 break;
128 128
129 case IVTV_ENC_STREAM_TYPE_YUV: 129 case IVTV_ENC_STREAM_TYPE_YUV:
@@ -131,13 +131,13 @@ static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MA
131 size = data[2]; 131 size = data[2];
132 UVoffset = data[3]; 132 UVoffset = data[3];
133 UVsize = data[4]; 133 UVsize = data[4];
134 s->dma_pts = ((u64) data[5] << 32) | data[6]; 134 s->pending_pts = ((u64) data[5] << 32) | data[6];
135 break; 135 break;
136 136
137 case IVTV_ENC_STREAM_TYPE_PCM: 137 case IVTV_ENC_STREAM_TYPE_PCM:
138 offset = data[1] + 12; 138 offset = data[1] + 12;
139 size = data[2] - 12; 139 size = data[2] - 12;
140 s->dma_pts = read_dec(offset - 8) | 140 s->pending_pts = read_dec(offset - 8) |
141 ((u64)(read_dec(offset - 12)) << 32); 141 ((u64)(read_dec(offset - 12)) << 32);
142 if (itv->has_cx23415) 142 if (itv->has_cx23415)
143 offset += IVTV_DECODER_OFFSET; 143 offset += IVTV_DECODER_OFFSET;
@@ -150,13 +150,13 @@ static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MA
150 IVTV_DEBUG_INFO("VBI offset == 0\n"); 150 IVTV_DEBUG_INFO("VBI offset == 0\n");
151 return -1; 151 return -1;
152 } 152 }
153 s->dma_pts = read_enc(offset - 4) | ((u64)read_enc(offset - 8) << 32); 153 s->pending_pts = read_enc(offset - 4) | ((u64)read_enc(offset - 8) << 32);
154 break; 154 break;
155 155
156 case IVTV_DEC_STREAM_TYPE_VBI: 156 case IVTV_DEC_STREAM_TYPE_VBI:
157 size = read_dec(itv->vbi.dec_start + 4) + 8; 157 size = read_dec(itv->vbi.dec_start + 4) + 8;
158 offset = read_dec(itv->vbi.dec_start) + itv->vbi.dec_start; 158 offset = read_dec(itv->vbi.dec_start) + itv->vbi.dec_start;
159 s->dma_pts = 0; 159 s->pending_pts = 0;
160 offset += IVTV_DECODER_OFFSET; 160 offset += IVTV_DECODER_OFFSET;
161 break; 161 break;
162 default: 162 default:
@@ -165,17 +165,17 @@ static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MA
165 } 165 }
166 166
167 /* if this is the start of the DMA then fill in the magic cookie */ 167 /* if this is the start of the DMA then fill in the magic cookie */
168 if (s->SG_length == 0) { 168 if (s->sg_pending_size == 0) {
169 if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM || 169 if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
170 s->type == IVTV_DEC_STREAM_TYPE_VBI)) { 170 s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
171 s->dma_backup = read_dec(offset - IVTV_DECODER_OFFSET); 171 s->pending_backup = read_dec(offset - IVTV_DECODER_OFFSET);
172 write_dec_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset - IVTV_DECODER_OFFSET); 172 write_dec_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset - IVTV_DECODER_OFFSET);
173 } 173 }
174 else { 174 else {
175 s->dma_backup = read_enc(offset); 175 s->pending_backup = read_enc(offset);
176 write_enc_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset); 176 write_enc_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset);
177 } 177 }
178 s->dma_offset = offset; 178 s->pending_offset = offset;
179 } 179 }
180 180
181 bytes_needed = size; 181 bytes_needed = size;
@@ -202,7 +202,7 @@ static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MA
202 } 202 }
203 s->buffers_stolen = rc; 203 s->buffers_stolen = rc;
204 204
205 /* got the buffers, now fill in SGarray (DMA) */ 205 /* got the buffers, now fill in sg_pending */
206 buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list); 206 buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list);
207 memset(buf->buf, 0, 128); 207 memset(buf->buf, 0, 128);
208 list_for_each(p, &s->q_predma.list) { 208 list_for_each(p, &s->q_predma.list) {
@@ -210,9 +210,9 @@ static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MA
210 210
211 if (skip_bufs-- > 0) 211 if (skip_bufs-- > 0)
212 continue; 212 continue;
213 s->SGarray[idx].dst = cpu_to_le32(buf->dma_handle); 213 s->sg_pending[idx].dst = buf->dma_handle;
214 s->SGarray[idx].src = cpu_to_le32(offset); 214 s->sg_pending[idx].src = offset;
215 s->SGarray[idx].size = cpu_to_le32(s->buf_size); 215 s->sg_pending[idx].size = s->buf_size;
216 buf->bytesused = (size < s->buf_size) ? size : s->buf_size; 216 buf->bytesused = (size < s->buf_size) ? size : s->buf_size;
217 buf->dma_xfer_cnt = s->dma_xfer_cnt; 217 buf->dma_xfer_cnt = s->dma_xfer_cnt;
218 218
@@ -230,7 +230,7 @@ static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MA
230 } 230 }
231 idx++; 231 idx++;
232 } 232 }
233 s->SG_length = idx; 233 s->sg_pending_size = idx;
234 return 0; 234 return 0;
235} 235}
236 236
@@ -332,9 +332,9 @@ void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock)
332 offset = uv_offset; 332 offset = uv_offset;
333 y_done = 1; 333 y_done = 1;
334 } 334 }
335 s->SGarray[idx].src = cpu_to_le32(buf->dma_handle); 335 s->sg_pending[idx].src = buf->dma_handle;
336 s->SGarray[idx].dst = cpu_to_le32(offset); 336 s->sg_pending[idx].dst = offset;
337 s->SGarray[idx].size = cpu_to_le32(buf->bytesused); 337 s->sg_pending[idx].size = buf->bytesused;
338 338
339 offset += buf->bytesused; 339 offset += buf->bytesused;
340 bytes_written += buf->bytesused; 340 bytes_written += buf->bytesused;
@@ -343,10 +343,7 @@ void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock)
343 ivtv_buf_sync_for_device(s, buf); 343 ivtv_buf_sync_for_device(s, buf);
344 idx++; 344 idx++;
345 } 345 }
346 s->SG_length = idx; 346 s->sg_pending_size = idx;
347
348 /* Mark last buffer size for Interrupt flag */
349 s->SGarray[s->SG_length - 1].size |= cpu_to_le32(0x80000000);
350 347
351 /* Sync Hardware SG List of buffers */ 348 /* Sync Hardware SG List of buffers */
352 ivtv_stream_sync_for_device(s); 349 ivtv_stream_sync_for_device(s);
@@ -362,6 +359,34 @@ void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock)
362 spin_unlock_irqrestore(&itv->dma_reg_lock, flags); 359 spin_unlock_irqrestore(&itv->dma_reg_lock, flags);
363} 360}
364 361
362static void ivtv_dma_enc_start_xfer(struct ivtv_stream *s)
363{
364 struct ivtv *itv = s->itv;
365
366 s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src);
367 s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst);
368 s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000);
369 s->sg_processed++;
370 /* Sync Hardware SG List of buffers */
371 ivtv_stream_sync_for_device(s);
372 write_reg(s->sg_handle, IVTV_REG_ENCDMAADDR);
373 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x02, IVTV_REG_DMAXFER);
374}
375
376static void ivtv_dma_dec_start_xfer(struct ivtv_stream *s)
377{
378 struct ivtv *itv = s->itv;
379
380 s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src);
381 s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst);
382 s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000);
383 s->sg_processed++;
384 /* Sync Hardware SG List of buffers */
385 ivtv_stream_sync_for_device(s);
386 write_reg(s->sg_handle, IVTV_REG_DECDMAADDR);
387 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER);
388}
389
365/* start the encoder DMA */ 390/* start the encoder DMA */
366static void ivtv_dma_enc_start(struct ivtv_stream *s) 391static void ivtv_dma_enc_start(struct ivtv_stream *s)
367{ 392{
@@ -375,8 +400,7 @@ static void ivtv_dma_enc_start(struct ivtv_stream *s)
375 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused); 400 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
376 401
377 if (ivtv_use_dma(s)) 402 if (ivtv_use_dma(s))
378 s->SGarray[s->SG_length - 1].size = 403 s->sg_pending[s->sg_pending_size - 1].size += 256;
379 cpu_to_le32(le32_to_cpu(s->SGarray[s->SG_length - 1].size) + 256);
380 404
381 /* If this is an MPEG stream, and VBI data is also pending, then append the 405 /* If this is an MPEG stream, and VBI data is also pending, then append the
382 VBI DMA to the MPEG DMA and transfer both sets of data at once. 406 VBI DMA to the MPEG DMA and transfer both sets of data at once.
@@ -387,45 +411,39 @@ static void ivtv_dma_enc_start(struct ivtv_stream *s)
387 sure we only use the MPEG DMA to transfer the VBI DMA if both are in 411 sure we only use the MPEG DMA to transfer the VBI DMA if both are in
388 use. This way no conflicts occur. */ 412 use. This way no conflicts occur. */
389 clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags); 413 clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
390 if (s->type == IVTV_ENC_STREAM_TYPE_MPG && s_vbi->SG_length && 414 if (s->type == IVTV_ENC_STREAM_TYPE_MPG && s_vbi->sg_pending_size &&
391 s->SG_length + s_vbi->SG_length <= s->buffers) { 415 s->sg_pending_size + s_vbi->sg_pending_size <= s->buffers) {
392 ivtv_queue_move(s_vbi, &s_vbi->q_predma, NULL, &s_vbi->q_dma, s_vbi->q_predma.bytesused); 416 ivtv_queue_move(s_vbi, &s_vbi->q_predma, NULL, &s_vbi->q_dma, s_vbi->q_predma.bytesused);
393 if (ivtv_use_dma(s_vbi)) 417 if (ivtv_use_dma(s_vbi))
394 s_vbi->SGarray[s_vbi->SG_length - 1].size = cpu_to_le32(le32_to_cpu(s_vbi->SGarray[s->SG_length - 1].size) + 256); 418 s_vbi->sg_pending[s_vbi->sg_pending_size - 1].size += 256;
395 for (i = 0; i < s_vbi->SG_length; i++) { 419 for (i = 0; i < s_vbi->sg_pending_size; i++) {
396 s->SGarray[s->SG_length++] = s_vbi->SGarray[i]; 420 s->sg_pending[s->sg_pending_size++] = s_vbi->sg_pending[i];
397 } 421 }
398 itv->vbi.dma_offset = s_vbi->dma_offset; 422 s_vbi->dma_offset = s_vbi->pending_offset;
399 s_vbi->SG_length = 0; 423 s_vbi->sg_pending_size = 0;
400 s_vbi->dma_xfer_cnt++; 424 s_vbi->dma_xfer_cnt++;
401 set_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags); 425 set_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
402 IVTV_DEBUG_HI_DMA("include DMA for %s\n", s->name); 426 IVTV_DEBUG_HI_DMA("include DMA for %s\n", s->name);
403 } 427 }
404 428
405 /* Mark last buffer size for Interrupt flag */
406 s->SGarray[s->SG_length - 1].size |= cpu_to_le32(0x80000000);
407 s->dma_xfer_cnt++; 429 s->dma_xfer_cnt++;
408 430 memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_element) * s->sg_pending_size);
409 if (s->type == IVTV_ENC_STREAM_TYPE_VBI) 431 s->sg_processing_size = s->sg_pending_size;
410 set_bit(IVTV_F_I_ENC_VBI, &itv->i_flags); 432 s->sg_pending_size = 0;
411 else 433 s->sg_processed = 0;
412 clear_bit(IVTV_F_I_ENC_VBI, &itv->i_flags); 434 s->dma_offset = s->pending_offset;
435 s->dma_backup = s->pending_backup;
436 s->dma_pts = s->pending_pts;
413 437
414 if (ivtv_use_pio(s)) { 438 if (ivtv_use_pio(s)) {
415 for (i = 0; i < s->SG_length; i++) {
416 s->PIOarray[i].src = le32_to_cpu(s->SGarray[i].src);
417 s->PIOarray[i].size = le32_to_cpu(s->SGarray[i].size);
418 }
419 set_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags); 439 set_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags);
420 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags); 440 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
421 set_bit(IVTV_F_I_PIO, &itv->i_flags); 441 set_bit(IVTV_F_I_PIO, &itv->i_flags);
422 itv->cur_pio_stream = s->type; 442 itv->cur_pio_stream = s->type;
423 } 443 }
424 else { 444 else {
425 /* Sync Hardware SG List of buffers */ 445 itv->dma_retries = 0;
426 ivtv_stream_sync_for_device(s); 446 ivtv_dma_enc_start_xfer(s);
427 write_reg(s->SG_handle, IVTV_REG_ENCDMAADDR);
428 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x02, IVTV_REG_DMAXFER);
429 set_bit(IVTV_F_I_DMA, &itv->i_flags); 447 set_bit(IVTV_F_I_DMA, &itv->i_flags);
430 itv->cur_dma_stream = s->type; 448 itv->cur_dma_stream = s->type;
431 itv->dma_timer.expires = jiffies + msecs_to_jiffies(100); 449 itv->dma_timer.expires = jiffies + msecs_to_jiffies(100);
@@ -439,10 +457,15 @@ static void ivtv_dma_dec_start(struct ivtv_stream *s)
439 457
440 if (s->q_predma.bytesused) 458 if (s->q_predma.bytesused)
441 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused); 459 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
460 s->dma_xfer_cnt++;
461 memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_element) * s->sg_pending_size);
462 s->sg_processing_size = s->sg_pending_size;
463 s->sg_pending_size = 0;
464 s->sg_processed = 0;
465
442 IVTV_DEBUG_HI_DMA("start DMA for %s\n", s->name); 466 IVTV_DEBUG_HI_DMA("start DMA for %s\n", s->name);
443 /* put SG Handle into register 0x0c */ 467 itv->dma_retries = 0;
444 write_reg(s->SG_handle, IVTV_REG_DECDMAADDR); 468 ivtv_dma_dec_start_xfer(s);
445 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER);
446 set_bit(IVTV_F_I_DMA, &itv->i_flags); 469 set_bit(IVTV_F_I_DMA, &itv->i_flags);
447 itv->cur_dma_stream = s->type; 470 itv->cur_dma_stream = s->type;
448 itv->dma_timer.expires = jiffies + msecs_to_jiffies(100); 471 itv->dma_timer.expires = jiffies + msecs_to_jiffies(100);
@@ -453,27 +476,42 @@ static void ivtv_irq_dma_read(struct ivtv *itv)
453{ 476{
454 struct ivtv_stream *s = NULL; 477 struct ivtv_stream *s = NULL;
455 struct ivtv_buffer *buf; 478 struct ivtv_buffer *buf;
456 int hw_stream_type; 479 int hw_stream_type = 0;
457 480
458 IVTV_DEBUG_HI_IRQ("DEC DMA READ\n"); 481 IVTV_DEBUG_HI_IRQ("DEC DMA READ\n");
459 del_timer(&itv->dma_timer); 482 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) && itv->cur_dma_stream < 0) {
460 if (read_reg(IVTV_REG_DMASTATUS) & 0x14) { 483 del_timer(&itv->dma_timer);
461 IVTV_DEBUG_WARN("DEC DMA ERROR %x\n", read_reg(IVTV_REG_DMASTATUS)); 484 return;
462 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
463 } 485 }
486
464 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags)) { 487 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
465 if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) { 488 s = &itv->streams[itv->cur_dma_stream];
466 s = &itv->streams[IVTV_DEC_STREAM_TYPE_YUV]; 489 ivtv_stream_sync_for_cpu(s);
467 hw_stream_type = 2; 490
491 if (read_reg(IVTV_REG_DMASTATUS) & 0x14) {
492 IVTV_DEBUG_WARN("DEC DMA ERROR %x (xfer %d of %d, retry %d)\n",
493 read_reg(IVTV_REG_DMASTATUS),
494 s->sg_processed, s->sg_processing_size, itv->dma_retries);
495 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
496 if (itv->dma_retries == 3) {
497 itv->dma_retries = 0;
498 }
499 else {
500 /* Retry, starting with the first xfer segment.
501 Just retrying the current segment is not sufficient. */
502 s->sg_processed = 0;
503 itv->dma_retries++;
504 }
468 } 505 }
469 else { 506 if (s->sg_processed < s->sg_processing_size) {
470 s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG]; 507 /* DMA next buffer */
471 hw_stream_type = 0; 508 ivtv_dma_dec_start_xfer(s);
509 return;
472 } 510 }
511 if (s->type == IVTV_DEC_STREAM_TYPE_YUV)
512 hw_stream_type = 2;
473 IVTV_DEBUG_HI_DMA("DEC DATA READ %s: %d\n", s->name, s->q_dma.bytesused); 513 IVTV_DEBUG_HI_DMA("DEC DATA READ %s: %d\n", s->name, s->q_dma.bytesused);
474 514
475 ivtv_stream_sync_for_cpu(s);
476
477 /* For some reason must kick the firmware, like PIO mode, 515 /* For some reason must kick the firmware, like PIO mode,
478 I think this tells the firmware we are done and the size 516 I think this tells the firmware we are done and the size
479 of the xfer so it can calculate what we need next. 517 of the xfer so it can calculate what we need next.
@@ -490,6 +528,7 @@ static void ivtv_irq_dma_read(struct ivtv *itv)
490 } 528 }
491 wake_up(&s->waitq); 529 wake_up(&s->waitq);
492 } 530 }
531 del_timer(&itv->dma_timer);
493 clear_bit(IVTV_F_I_UDMA, &itv->i_flags); 532 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
494 clear_bit(IVTV_F_I_DMA, &itv->i_flags); 533 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
495 itv->cur_dma_stream = -1; 534 itv->cur_dma_stream = -1;
@@ -501,33 +540,44 @@ static void ivtv_irq_enc_dma_complete(struct ivtv *itv)
501 u32 data[CX2341X_MBOX_MAX_DATA]; 540 u32 data[CX2341X_MBOX_MAX_DATA];
502 struct ivtv_stream *s; 541 struct ivtv_stream *s;
503 542
504 del_timer(&itv->dma_timer);
505 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, data); 543 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, data);
506 IVTV_DEBUG_HI_IRQ("ENC DMA COMPLETE %x %d\n", data[0], data[1]); 544 IVTV_DEBUG_HI_IRQ("ENC DMA COMPLETE %x %d (%d)\n", data[0], data[1], itv->cur_dma_stream);
507 if (test_and_clear_bit(IVTV_F_I_ENC_VBI, &itv->i_flags)) 545 if (itv->cur_dma_stream < 0) {
508 data[1] = 3; 546 del_timer(&itv->dma_timer);
509 else if (data[1] > 2)
510 return; 547 return;
511 s = &itv->streams[ivtv_stream_map[data[1]]]; 548 }
549 s = &itv->streams[itv->cur_dma_stream];
550 ivtv_stream_sync_for_cpu(s);
551
512 if (data[0] & 0x18) { 552 if (data[0] & 0x18) {
513 IVTV_DEBUG_WARN("ENC DMA ERROR %x\n", data[0]); 553 IVTV_DEBUG_WARN("ENC DMA ERROR %x (offset %08x, xfer %d of %d, retry %d)\n", data[0],
554 s->dma_offset, s->sg_processed, s->sg_processing_size, itv->dma_retries);
514 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS); 555 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
515 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, data[1]); 556 if (itv->dma_retries == 3) {
557 itv->dma_retries = 0;
558 }
559 else {
560 /* Retry, starting with the first xfer segment.
561 Just retrying the current segment is not sufficient. */
562 s->sg_processed = 0;
563 itv->dma_retries++;
564 }
516 } 565 }
517 s->SG_length = 0; 566 if (s->sg_processed < s->sg_processing_size) {
567 /* DMA next buffer */
568 ivtv_dma_enc_start_xfer(s);
569 return;
570 }
571 del_timer(&itv->dma_timer);
518 clear_bit(IVTV_F_I_DMA, &itv->i_flags); 572 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
519 itv->cur_dma_stream = -1; 573 itv->cur_dma_stream = -1;
520 dma_post(s); 574 dma_post(s);
521 ivtv_stream_sync_for_cpu(s);
522 if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) { 575 if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
523 u32 tmp;
524
525 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI]; 576 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
526 tmp = s->dma_offset;
527 s->dma_offset = itv->vbi.dma_offset;
528 dma_post(s); 577 dma_post(s);
529 s->dma_offset = tmp;
530 } 578 }
579 s->sg_processing_size = 0;
580 s->sg_processed = 0;
531 wake_up(&itv->dma_waitq); 581 wake_up(&itv->dma_waitq);
532} 582}
533 583
@@ -541,8 +591,7 @@ static void ivtv_irq_enc_pio_complete(struct ivtv *itv)
541 } 591 }
542 s = &itv->streams[itv->cur_pio_stream]; 592 s = &itv->streams[itv->cur_pio_stream];
543 IVTV_DEBUG_HI_IRQ("ENC PIO COMPLETE %s\n", s->name); 593 IVTV_DEBUG_HI_IRQ("ENC PIO COMPLETE %s\n", s->name);
544 s->SG_length = 0; 594 s->sg_pending_size = 0;
545 clear_bit(IVTV_F_I_ENC_VBI, &itv->i_flags);
546 clear_bit(IVTV_F_I_PIO, &itv->i_flags); 595 clear_bit(IVTV_F_I_PIO, &itv->i_flags);
547 itv->cur_pio_stream = -1; 596 itv->cur_pio_stream = -1;
548 dma_post(s); 597 dma_post(s);
@@ -554,13 +603,8 @@ static void ivtv_irq_enc_pio_complete(struct ivtv *itv)
554 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 2); 603 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 2);
555 clear_bit(IVTV_F_I_PIO, &itv->i_flags); 604 clear_bit(IVTV_F_I_PIO, &itv->i_flags);
556 if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) { 605 if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
557 u32 tmp;
558
559 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI]; 606 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
560 tmp = s->dma_offset;
561 s->dma_offset = itv->vbi.dma_offset;
562 dma_post(s); 607 dma_post(s);
563 s->dma_offset = tmp;
564 } 608 }
565 wake_up(&itv->dma_waitq); 609 wake_up(&itv->dma_waitq);
566} 610}
@@ -572,19 +616,23 @@ static void ivtv_irq_dma_err(struct ivtv *itv)
572 del_timer(&itv->dma_timer); 616 del_timer(&itv->dma_timer);
573 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, data); 617 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, data);
574 IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data[0], data[1], 618 IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data[0], data[1],
575 read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream); 619 read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
620 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
576 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) && 621 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) &&
577 itv->cur_dma_stream >= 0 && itv->cur_dma_stream < IVTV_MAX_STREAMS) { 622 itv->cur_dma_stream >= 0 && itv->cur_dma_stream < IVTV_MAX_STREAMS) {
578 struct ivtv_stream *s = &itv->streams[itv->cur_dma_stream]; 623 struct ivtv_stream *s = &itv->streams[itv->cur_dma_stream];
579 624
580 /* retry */ 625 /* retry */
581 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
582 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) 626 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
583 ivtv_dma_dec_start(s); 627 ivtv_dma_dec_start(s);
584 else 628 else
585 ivtv_dma_enc_start(s); 629 ivtv_dma_enc_start(s);
586 return; 630 return;
587 } 631 }
632 if (test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
633 ivtv_udma_start(itv);
634 return;
635 }
588 clear_bit(IVTV_F_I_UDMA, &itv->i_flags); 636 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
589 clear_bit(IVTV_F_I_DMA, &itv->i_flags); 637 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
590 itv->cur_dma_stream = -1; 638 itv->cur_dma_stream = -1;
@@ -628,14 +676,14 @@ static void ivtv_irq_enc_vbi_cap(struct ivtv *itv)
628 DMA the data. Since at most four VBI DMA buffers are available, 676 DMA the data. Since at most four VBI DMA buffers are available,
629 we just drop the old requests when there are already three 677 we just drop the old requests when there are already three
630 requests queued. */ 678 requests queued. */
631 if (s->SG_length > 2) { 679 if (s->sg_pending_size > 2) {
632 struct list_head *p; 680 struct list_head *p;
633 list_for_each(p, &s->q_predma.list) { 681 list_for_each(p, &s->q_predma.list) {
634 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list); 682 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
635 ivtv_buf_sync_for_cpu(s, buf); 683 ivtv_buf_sync_for_cpu(s, buf);
636 } 684 }
637 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_free, 0); 685 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_free, 0);
638 s->SG_length = 0; 686 s->sg_pending_size = 0;
639 } 687 }
640 /* if we can append the data, and the MPEG stream isn't capturing, 688 /* if we can append the data, and the MPEG stream isn't capturing,
641 then start a DMA request for just the VBI data. */ 689 then start a DMA request for just the VBI data. */