aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorHans Verkuil <hverkuil@xs4all.nl>2007-07-28 18:45:50 -0400
committerMauro Carvalho Chehab <mchehab@infradead.org>2007-10-09 21:05:32 -0400
commit37093b1ea600d84fbf7252baf12eedec85ae40f1 (patch)
treeba78b73933c0d7b8989831c49a86f16c26f99b04 /drivers
parentf4071b85ea0ca3bd06f63c330562b4cfdffa8473 (diff)
V4L/DVB (6047): ivtv: Fix scatter/gather DMA timeouts
It turns out that the cx23415/6 DMA engine cannot do scatter/gather DMA reliably. Every so often depending on the phase of the moon and your hardware configuration the cx2341x DMA engine simply chokes on it and you have to reboot to get it working again. This change replaced the scatter/gather DMA by single transfers at a time, where the driver is now responsible for DMA-ing each buffer. UDMA is still done using scatter/gather DMA, that will be fixed soon. Many thanks to Mark Bryars <mark.bryars@etvinteractive.com> for discovering the link between scatter/gather and the DMA timeouts. Signed-off-by: Hans Verkuil <hverkuil@xs4all.nl> Signed-off-by: Mauro Carvalho Chehab <mchehab@infradead.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.h22
-rw-r--r--drivers/media/video/ivtv/ivtv-irq.c234
-rw-r--r--drivers/media/video/ivtv/ivtv-queue.c63
-rw-r--r--drivers/media/video/ivtv/ivtv-queue.h8
-rw-r--r--drivers/media/video/ivtv/ivtv-streams.c2
5 files changed, 196 insertions, 133 deletions
diff --git a/drivers/media/video/ivtv/ivtv-driver.h b/drivers/media/video/ivtv/ivtv-driver.h
index f5de2fd01b1a..e80f9f65a905 100644
--- a/drivers/media/video/ivtv/ivtv-driver.h
+++ b/drivers/media/video/ivtv/ivtv-driver.h
@@ -382,7 +382,6 @@ struct ivtv_mailbox_data {
382#define IVTV_F_I_RADIO_USER 5 /* The radio tuner is selected */ 382#define IVTV_F_I_RADIO_USER 5 /* The radio tuner is selected */
383#define IVTV_F_I_DIG_RST 6 /* Reset digitizer */ 383#define IVTV_F_I_DIG_RST 6 /* Reset digitizer */
384#define IVTV_F_I_DEC_YUV 7 /* YUV instead of MPG is being decoded */ 384#define IVTV_F_I_DEC_YUV 7 /* YUV instead of MPG is being decoded */
385#define IVTV_F_I_ENC_VBI 8 /* VBI DMA */
386#define IVTV_F_I_UPDATE_CC 9 /* CC should be updated */ 385#define IVTV_F_I_UPDATE_CC 9 /* CC should be updated */
387#define IVTV_F_I_UPDATE_WSS 10 /* WSS should be updated */ 386#define IVTV_F_I_UPDATE_WSS 10 /* WSS should be updated */
388#define IVTV_F_I_UPDATE_VPS 11 /* VPS should be updated */ 387#define IVTV_F_I_UPDATE_VPS 11 /* VPS should be updated */
@@ -405,7 +404,7 @@ struct ivtv_mailbox_data {
405#define IVTV_F_I_EV_VSYNC_ENABLED 31 /* VSYNC event enabled */ 404#define IVTV_F_I_EV_VSYNC_ENABLED 31 /* VSYNC event enabled */
406 405
407/* Scatter-Gather array element, used in DMA transfers */ 406/* Scatter-Gather array element, used in DMA transfers */
408struct ivtv_SG_element { 407struct ivtv_sg_element {
409 u32 src; 408 u32 src;
410 u32 dst; 409 u32 dst;
411 u32 size; 410 u32 size;
@@ -417,7 +416,7 @@ struct ivtv_user_dma {
417 struct page *map[IVTV_DMA_SG_OSD_ENT]; 416 struct page *map[IVTV_DMA_SG_OSD_ENT];
418 417
419 /* Base Dev SG Array for cx23415/6 */ 418 /* Base Dev SG Array for cx23415/6 */
420 struct ivtv_SG_element SGarray[IVTV_DMA_SG_OSD_ENT]; 419 struct ivtv_sg_element SGarray[IVTV_DMA_SG_OSD_ENT];
421 dma_addr_t SG_handle; 420 dma_addr_t SG_handle;
422 int SG_length; 421 int SG_length;
423 422
@@ -468,6 +467,10 @@ struct ivtv_stream {
468 int dma; /* can be PCI_DMA_TODEVICE, 467 int dma; /* can be PCI_DMA_TODEVICE,
469 PCI_DMA_FROMDEVICE or 468 PCI_DMA_FROMDEVICE or
470 PCI_DMA_NONE */ 469 PCI_DMA_NONE */
470 u32 pending_offset;
471 u32 pending_backup;
472 u64 pending_pts;
473
471 u32 dma_offset; 474 u32 dma_offset;
472 u32 dma_backup; 475 u32 dma_backup;
473 u64 dma_pts; 476 u64 dma_pts;
@@ -493,10 +496,13 @@ struct ivtv_stream {
493 u16 dma_xfer_cnt; 496 u16 dma_xfer_cnt;
494 497
495 /* Base Dev SG Array for cx23415/6 */ 498 /* Base Dev SG Array for cx23415/6 */
496 struct ivtv_SG_element *SGarray; 499 struct ivtv_sg_element *sg_pending;
497 struct ivtv_SG_element *PIOarray; 500 struct ivtv_sg_element *sg_processing;
498 dma_addr_t SG_handle; 501 struct ivtv_sg_element *sg_dma;
499 int SG_length; 502 dma_addr_t sg_handle;
503 int sg_pending_size;
504 int sg_processing_size;
505 int sg_processed;
500 506
501 /* SG List of Buffers */ 507 /* SG List of Buffers */
502 struct scatterlist *SGlist; 508 struct scatterlist *SGlist;
@@ -637,7 +643,6 @@ struct vbi_info {
637 u32 enc_start, enc_size; 643 u32 enc_start, enc_size;
638 int fpi; 644 int fpi;
639 u32 frame; 645 u32 frame;
640 u32 dma_offset;
641 u8 cc_data_odd[256]; 646 u8 cc_data_odd[256];
642 u8 cc_data_even[256]; 647 u8 cc_data_even[256];
643 int cc_pos; 648 int cc_pos;
@@ -724,6 +729,7 @@ struct ivtv {
724 int cur_pio_stream; /* index of stream doing PIO */ 729 int cur_pio_stream; /* index of stream doing PIO */
725 u32 dma_data_req_offset; 730 u32 dma_data_req_offset;
726 u32 dma_data_req_size; 731 u32 dma_data_req_size;
732 int dma_retries;
727 int output_mode; /* NONE, MPG, YUV, UDMA YUV, passthrough */ 733 int output_mode; /* NONE, MPG, YUV, UDMA YUV, passthrough */
728 spinlock_t lock; /* lock access to this struct */ 734 spinlock_t lock; /* lock access to this struct */
729 int search_pack_header; 735 int search_pack_header;
diff --git a/drivers/media/video/ivtv/ivtv-irq.c b/drivers/media/video/ivtv/ivtv-irq.c
index 8644f3dda31e..9695e5356163 100644
--- a/drivers/media/video/ivtv/ivtv-irq.c
+++ b/drivers/media/video/ivtv/ivtv-irq.c
@@ -60,18 +60,18 @@ static void ivtv_pio_work_handler(struct ivtv *itv)
60 buf = list_entry(s->q_dma.list.next, struct ivtv_buffer, list); 60 buf = list_entry(s->q_dma.list.next, struct ivtv_buffer, list);
61 list_for_each(p, &s->q_dma.list) { 61 list_for_each(p, &s->q_dma.list) {
62 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list); 62 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
63 u32 size = s->PIOarray[i].size & 0x3ffff; 63 u32 size = s->sg_processing[i].size & 0x3ffff;
64 64
65 /* Copy the data from the card to the buffer */ 65 /* Copy the data from the card to the buffer */
66 if (s->type == IVTV_DEC_STREAM_TYPE_VBI) { 66 if (s->type == IVTV_DEC_STREAM_TYPE_VBI) {
67 memcpy_fromio(buf->buf, itv->dec_mem + s->PIOarray[i].src - IVTV_DECODER_OFFSET, size); 67 memcpy_fromio(buf->buf, itv->dec_mem + s->sg_processing[i].src - IVTV_DECODER_OFFSET, size);
68 } 68 }
69 else { 69 else {
70 memcpy_fromio(buf->buf, itv->enc_mem + s->PIOarray[i].src, size); 70 memcpy_fromio(buf->buf, itv->enc_mem + s->sg_processing[i].src, size);
71 } 71 }
72 if (s->PIOarray[i].size & 0x80000000)
73 break;
74 i++; 72 i++;
73 if (i == s->sg_processing_size)
74 break;
75 } 75 }
76 write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44); 76 write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
77} 77}
@@ -105,7 +105,7 @@ static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MA
105 u32 offset, size; 105 u32 offset, size;
106 u32 UVoffset = 0, UVsize = 0; 106 u32 UVoffset = 0, UVsize = 0;
107 int skip_bufs = s->q_predma.buffers; 107 int skip_bufs = s->q_predma.buffers;
108 int idx = s->SG_length; 108 int idx = s->sg_pending_size;
109 int rc; 109 int rc;
110 110
111 /* sanity checks */ 111 /* sanity checks */
@@ -123,7 +123,7 @@ static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MA
123 case IVTV_ENC_STREAM_TYPE_MPG: 123 case IVTV_ENC_STREAM_TYPE_MPG:
124 offset = data[1]; 124 offset = data[1];
125 size = data[2]; 125 size = data[2];
126 s->dma_pts = 0; 126 s->pending_pts = 0;
127 break; 127 break;
128 128
129 case IVTV_ENC_STREAM_TYPE_YUV: 129 case IVTV_ENC_STREAM_TYPE_YUV:
@@ -131,13 +131,13 @@ static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MA
131 size = data[2]; 131 size = data[2];
132 UVoffset = data[3]; 132 UVoffset = data[3];
133 UVsize = data[4]; 133 UVsize = data[4];
134 s->dma_pts = ((u64) data[5] << 32) | data[6]; 134 s->pending_pts = ((u64) data[5] << 32) | data[6];
135 break; 135 break;
136 136
137 case IVTV_ENC_STREAM_TYPE_PCM: 137 case IVTV_ENC_STREAM_TYPE_PCM:
138 offset = data[1] + 12; 138 offset = data[1] + 12;
139 size = data[2] - 12; 139 size = data[2] - 12;
140 s->dma_pts = read_dec(offset - 8) | 140 s->pending_pts = read_dec(offset - 8) |
141 ((u64)(read_dec(offset - 12)) << 32); 141 ((u64)(read_dec(offset - 12)) << 32);
142 if (itv->has_cx23415) 142 if (itv->has_cx23415)
143 offset += IVTV_DECODER_OFFSET; 143 offset += IVTV_DECODER_OFFSET;
@@ -150,13 +150,13 @@ static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MA
150 IVTV_DEBUG_INFO("VBI offset == 0\n"); 150 IVTV_DEBUG_INFO("VBI offset == 0\n");
151 return -1; 151 return -1;
152 } 152 }
153 s->dma_pts = read_enc(offset - 4) | ((u64)read_enc(offset - 8) << 32); 153 s->pending_pts = read_enc(offset - 4) | ((u64)read_enc(offset - 8) << 32);
154 break; 154 break;
155 155
156 case IVTV_DEC_STREAM_TYPE_VBI: 156 case IVTV_DEC_STREAM_TYPE_VBI:
157 size = read_dec(itv->vbi.dec_start + 4) + 8; 157 size = read_dec(itv->vbi.dec_start + 4) + 8;
158 offset = read_dec(itv->vbi.dec_start) + itv->vbi.dec_start; 158 offset = read_dec(itv->vbi.dec_start) + itv->vbi.dec_start;
159 s->dma_pts = 0; 159 s->pending_pts = 0;
160 offset += IVTV_DECODER_OFFSET; 160 offset += IVTV_DECODER_OFFSET;
161 break; 161 break;
162 default: 162 default:
@@ -165,17 +165,17 @@ static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MA
165 } 165 }
166 166
167 /* if this is the start of the DMA then fill in the magic cookie */ 167 /* if this is the start of the DMA then fill in the magic cookie */
168 if (s->SG_length == 0) { 168 if (s->sg_pending_size == 0) {
169 if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM || 169 if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
170 s->type == IVTV_DEC_STREAM_TYPE_VBI)) { 170 s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
171 s->dma_backup = read_dec(offset - IVTV_DECODER_OFFSET); 171 s->pending_backup = read_dec(offset - IVTV_DECODER_OFFSET);
172 write_dec_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset - IVTV_DECODER_OFFSET); 172 write_dec_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset - IVTV_DECODER_OFFSET);
173 } 173 }
174 else { 174 else {
175 s->dma_backup = read_enc(offset); 175 s->pending_backup = read_enc(offset);
176 write_enc_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset); 176 write_enc_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset);
177 } 177 }
178 s->dma_offset = offset; 178 s->pending_offset = offset;
179 } 179 }
180 180
181 bytes_needed = size; 181 bytes_needed = size;
@@ -202,7 +202,7 @@ static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MA
202 } 202 }
203 s->buffers_stolen = rc; 203 s->buffers_stolen = rc;
204 204
205 /* got the buffers, now fill in SGarray (DMA) */ 205 /* got the buffers, now fill in sg_pending */
206 buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list); 206 buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list);
207 memset(buf->buf, 0, 128); 207 memset(buf->buf, 0, 128);
208 list_for_each(p, &s->q_predma.list) { 208 list_for_each(p, &s->q_predma.list) {
@@ -210,9 +210,9 @@ static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MA
210 210
211 if (skip_bufs-- > 0) 211 if (skip_bufs-- > 0)
212 continue; 212 continue;
213 s->SGarray[idx].dst = cpu_to_le32(buf->dma_handle); 213 s->sg_pending[idx].dst = buf->dma_handle;
214 s->SGarray[idx].src = cpu_to_le32(offset); 214 s->sg_pending[idx].src = offset;
215 s->SGarray[idx].size = cpu_to_le32(s->buf_size); 215 s->sg_pending[idx].size = s->buf_size;
216 buf->bytesused = (size < s->buf_size) ? size : s->buf_size; 216 buf->bytesused = (size < s->buf_size) ? size : s->buf_size;
217 buf->dma_xfer_cnt = s->dma_xfer_cnt; 217 buf->dma_xfer_cnt = s->dma_xfer_cnt;
218 218
@@ -230,7 +230,7 @@ static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MA
230 } 230 }
231 idx++; 231 idx++;
232 } 232 }
233 s->SG_length = idx; 233 s->sg_pending_size = idx;
234 return 0; 234 return 0;
235} 235}
236 236
@@ -332,9 +332,9 @@ void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock)
332 offset = uv_offset; 332 offset = uv_offset;
333 y_done = 1; 333 y_done = 1;
334 } 334 }
335 s->SGarray[idx].src = cpu_to_le32(buf->dma_handle); 335 s->sg_pending[idx].src = buf->dma_handle;
336 s->SGarray[idx].dst = cpu_to_le32(offset); 336 s->sg_pending[idx].dst = offset;
337 s->SGarray[idx].size = cpu_to_le32(buf->bytesused); 337 s->sg_pending[idx].size = buf->bytesused;
338 338
339 offset += buf->bytesused; 339 offset += buf->bytesused;
340 bytes_written += buf->bytesused; 340 bytes_written += buf->bytesused;
@@ -343,10 +343,7 @@ void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock)
343 ivtv_buf_sync_for_device(s, buf); 343 ivtv_buf_sync_for_device(s, buf);
344 idx++; 344 idx++;
345 } 345 }
346 s->SG_length = idx; 346 s->sg_pending_size = idx;
347
348 /* Mark last buffer size for Interrupt flag */
349 s->SGarray[s->SG_length - 1].size |= cpu_to_le32(0x80000000);
350 347
351 /* Sync Hardware SG List of buffers */ 348 /* Sync Hardware SG List of buffers */
352 ivtv_stream_sync_for_device(s); 349 ivtv_stream_sync_for_device(s);
@@ -362,6 +359,34 @@ void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock)
362 spin_unlock_irqrestore(&itv->dma_reg_lock, flags); 359 spin_unlock_irqrestore(&itv->dma_reg_lock, flags);
363} 360}
364 361
362static void ivtv_dma_enc_start_xfer(struct ivtv_stream *s)
363{
364 struct ivtv *itv = s->itv;
365
366 s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src);
367 s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst);
368 s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000);
369 s->sg_processed++;
370 /* Sync Hardware SG List of buffers */
371 ivtv_stream_sync_for_device(s);
372 write_reg(s->sg_handle, IVTV_REG_ENCDMAADDR);
373 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x02, IVTV_REG_DMAXFER);
374}
375
376static void ivtv_dma_dec_start_xfer(struct ivtv_stream *s)
377{
378 struct ivtv *itv = s->itv;
379
380 s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src);
381 s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst);
382 s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000);
383 s->sg_processed++;
384 /* Sync Hardware SG List of buffers */
385 ivtv_stream_sync_for_device(s);
386 write_reg(s->sg_handle, IVTV_REG_DECDMAADDR);
387 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER);
388}
389
365/* start the encoder DMA */ 390/* start the encoder DMA */
366static void ivtv_dma_enc_start(struct ivtv_stream *s) 391static void ivtv_dma_enc_start(struct ivtv_stream *s)
367{ 392{
@@ -375,8 +400,7 @@ static void ivtv_dma_enc_start(struct ivtv_stream *s)
375 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused); 400 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
376 401
377 if (ivtv_use_dma(s)) 402 if (ivtv_use_dma(s))
378 s->SGarray[s->SG_length - 1].size = 403 s->sg_pending[s->sg_pending_size - 1].size += 256;
379 cpu_to_le32(le32_to_cpu(s->SGarray[s->SG_length - 1].size) + 256);
380 404
381 /* If this is an MPEG stream, and VBI data is also pending, then append the 405 /* If this is an MPEG stream, and VBI data is also pending, then append the
382 VBI DMA to the MPEG DMA and transfer both sets of data at once. 406 VBI DMA to the MPEG DMA and transfer both sets of data at once.
@@ -387,45 +411,39 @@ static void ivtv_dma_enc_start(struct ivtv_stream *s)
387 sure we only use the MPEG DMA to transfer the VBI DMA if both are in 411 sure we only use the MPEG DMA to transfer the VBI DMA if both are in
388 use. This way no conflicts occur. */ 412 use. This way no conflicts occur. */
389 clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags); 413 clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
390 if (s->type == IVTV_ENC_STREAM_TYPE_MPG && s_vbi->SG_length && 414 if (s->type == IVTV_ENC_STREAM_TYPE_MPG && s_vbi->sg_pending_size &&
391 s->SG_length + s_vbi->SG_length <= s->buffers) { 415 s->sg_pending_size + s_vbi->sg_pending_size <= s->buffers) {
392 ivtv_queue_move(s_vbi, &s_vbi->q_predma, NULL, &s_vbi->q_dma, s_vbi->q_predma.bytesused); 416 ivtv_queue_move(s_vbi, &s_vbi->q_predma, NULL, &s_vbi->q_dma, s_vbi->q_predma.bytesused);
393 if (ivtv_use_dma(s_vbi)) 417 if (ivtv_use_dma(s_vbi))
394 s_vbi->SGarray[s_vbi->SG_length - 1].size = cpu_to_le32(le32_to_cpu(s_vbi->SGarray[s->SG_length - 1].size) + 256); 418 s_vbi->sg_pending[s_vbi->sg_pending_size - 1].size += 256;
395 for (i = 0; i < s_vbi->SG_length; i++) { 419 for (i = 0; i < s_vbi->sg_pending_size; i++) {
396 s->SGarray[s->SG_length++] = s_vbi->SGarray[i]; 420 s->sg_pending[s->sg_pending_size++] = s_vbi->sg_pending[i];
397 } 421 }
398 itv->vbi.dma_offset = s_vbi->dma_offset; 422 s_vbi->dma_offset = s_vbi->pending_offset;
399 s_vbi->SG_length = 0; 423 s_vbi->sg_pending_size = 0;
400 s_vbi->dma_xfer_cnt++; 424 s_vbi->dma_xfer_cnt++;
401 set_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags); 425 set_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
402 IVTV_DEBUG_HI_DMA("include DMA for %s\n", s->name); 426 IVTV_DEBUG_HI_DMA("include DMA for %s\n", s->name);
403 } 427 }
404 428
405 /* Mark last buffer size for Interrupt flag */
406 s->SGarray[s->SG_length - 1].size |= cpu_to_le32(0x80000000);
407 s->dma_xfer_cnt++; 429 s->dma_xfer_cnt++;
408 430 memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_element) * s->sg_pending_size);
409 if (s->type == IVTV_ENC_STREAM_TYPE_VBI) 431 s->sg_processing_size = s->sg_pending_size;
410 set_bit(IVTV_F_I_ENC_VBI, &itv->i_flags); 432 s->sg_pending_size = 0;
411 else 433 s->sg_processed = 0;
412 clear_bit(IVTV_F_I_ENC_VBI, &itv->i_flags); 434 s->dma_offset = s->pending_offset;
435 s->dma_backup = s->pending_backup;
436 s->dma_pts = s->pending_pts;
413 437
414 if (ivtv_use_pio(s)) { 438 if (ivtv_use_pio(s)) {
415 for (i = 0; i < s->SG_length; i++) {
416 s->PIOarray[i].src = le32_to_cpu(s->SGarray[i].src);
417 s->PIOarray[i].size = le32_to_cpu(s->SGarray[i].size);
418 }
419 set_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags); 439 set_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags);
420 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags); 440 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
421 set_bit(IVTV_F_I_PIO, &itv->i_flags); 441 set_bit(IVTV_F_I_PIO, &itv->i_flags);
422 itv->cur_pio_stream = s->type; 442 itv->cur_pio_stream = s->type;
423 } 443 }
424 else { 444 else {
425 /* Sync Hardware SG List of buffers */ 445 itv->dma_retries = 0;
426 ivtv_stream_sync_for_device(s); 446 ivtv_dma_enc_start_xfer(s);
427 write_reg(s->SG_handle, IVTV_REG_ENCDMAADDR);
428 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x02, IVTV_REG_DMAXFER);
429 set_bit(IVTV_F_I_DMA, &itv->i_flags); 447 set_bit(IVTV_F_I_DMA, &itv->i_flags);
430 itv->cur_dma_stream = s->type; 448 itv->cur_dma_stream = s->type;
431 itv->dma_timer.expires = jiffies + msecs_to_jiffies(100); 449 itv->dma_timer.expires = jiffies + msecs_to_jiffies(100);
@@ -439,10 +457,15 @@ static void ivtv_dma_dec_start(struct ivtv_stream *s)
439 457
440 if (s->q_predma.bytesused) 458 if (s->q_predma.bytesused)
441 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused); 459 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
460 s->dma_xfer_cnt++;
461 memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_element) * s->sg_pending_size);
462 s->sg_processing_size = s->sg_pending_size;
463 s->sg_pending_size = 0;
464 s->sg_processed = 0;
465
442 IVTV_DEBUG_HI_DMA("start DMA for %s\n", s->name); 466 IVTV_DEBUG_HI_DMA("start DMA for %s\n", s->name);
443 /* put SG Handle into register 0x0c */ 467 itv->dma_retries = 0;
444 write_reg(s->SG_handle, IVTV_REG_DECDMAADDR); 468 ivtv_dma_dec_start_xfer(s);
445 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER);
446 set_bit(IVTV_F_I_DMA, &itv->i_flags); 469 set_bit(IVTV_F_I_DMA, &itv->i_flags);
447 itv->cur_dma_stream = s->type; 470 itv->cur_dma_stream = s->type;
448 itv->dma_timer.expires = jiffies + msecs_to_jiffies(100); 471 itv->dma_timer.expires = jiffies + msecs_to_jiffies(100);
@@ -453,27 +476,42 @@ static void ivtv_irq_dma_read(struct ivtv *itv)
453{ 476{
454 struct ivtv_stream *s = NULL; 477 struct ivtv_stream *s = NULL;
455 struct ivtv_buffer *buf; 478 struct ivtv_buffer *buf;
456 int hw_stream_type; 479 int hw_stream_type = 0;
457 480
458 IVTV_DEBUG_HI_IRQ("DEC DMA READ\n"); 481 IVTV_DEBUG_HI_IRQ("DEC DMA READ\n");
459 del_timer(&itv->dma_timer); 482 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) && itv->cur_dma_stream < 0) {
460 if (read_reg(IVTV_REG_DMASTATUS) & 0x14) { 483 del_timer(&itv->dma_timer);
461 IVTV_DEBUG_WARN("DEC DMA ERROR %x\n", read_reg(IVTV_REG_DMASTATUS)); 484 return;
462 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
463 } 485 }
486
464 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags)) { 487 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
465 if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) { 488 s = &itv->streams[itv->cur_dma_stream];
466 s = &itv->streams[IVTV_DEC_STREAM_TYPE_YUV]; 489 ivtv_stream_sync_for_cpu(s);
467 hw_stream_type = 2; 490
491 if (read_reg(IVTV_REG_DMASTATUS) & 0x14) {
492 IVTV_DEBUG_WARN("DEC DMA ERROR %x (xfer %d of %d, retry %d)\n",
493 read_reg(IVTV_REG_DMASTATUS),
494 s->sg_processed, s->sg_processing_size, itv->dma_retries);
495 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
496 if (itv->dma_retries == 3) {
497 itv->dma_retries = 0;
498 }
499 else {
500 /* Retry, starting with the first xfer segment.
501 Just retrying the current segment is not sufficient. */
502 s->sg_processed = 0;
503 itv->dma_retries++;
504 }
468 } 505 }
469 else { 506 if (s->sg_processed < s->sg_processing_size) {
470 s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG]; 507 /* DMA next buffer */
471 hw_stream_type = 0; 508 ivtv_dma_dec_start_xfer(s);
509 return;
472 } 510 }
511 if (s->type == IVTV_DEC_STREAM_TYPE_YUV)
512 hw_stream_type = 2;
473 IVTV_DEBUG_HI_DMA("DEC DATA READ %s: %d\n", s->name, s->q_dma.bytesused); 513 IVTV_DEBUG_HI_DMA("DEC DATA READ %s: %d\n", s->name, s->q_dma.bytesused);
474 514
475 ivtv_stream_sync_for_cpu(s);
476
477 /* For some reason must kick the firmware, like PIO mode, 515 /* For some reason must kick the firmware, like PIO mode,
478 I think this tells the firmware we are done and the size 516 I think this tells the firmware we are done and the size
479 of the xfer so it can calculate what we need next. 517 of the xfer so it can calculate what we need next.
@@ -490,6 +528,7 @@ static void ivtv_irq_dma_read(struct ivtv *itv)
490 } 528 }
491 wake_up(&s->waitq); 529 wake_up(&s->waitq);
492 } 530 }
531 del_timer(&itv->dma_timer);
493 clear_bit(IVTV_F_I_UDMA, &itv->i_flags); 532 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
494 clear_bit(IVTV_F_I_DMA, &itv->i_flags); 533 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
495 itv->cur_dma_stream = -1; 534 itv->cur_dma_stream = -1;
@@ -501,33 +540,44 @@ static void ivtv_irq_enc_dma_complete(struct ivtv *itv)
501 u32 data[CX2341X_MBOX_MAX_DATA]; 540 u32 data[CX2341X_MBOX_MAX_DATA];
502 struct ivtv_stream *s; 541 struct ivtv_stream *s;
503 542
504 del_timer(&itv->dma_timer);
505 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, data); 543 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, data);
506 IVTV_DEBUG_HI_IRQ("ENC DMA COMPLETE %x %d\n", data[0], data[1]); 544 IVTV_DEBUG_HI_IRQ("ENC DMA COMPLETE %x %d (%d)\n", data[0], data[1], itv->cur_dma_stream);
507 if (test_and_clear_bit(IVTV_F_I_ENC_VBI, &itv->i_flags)) 545 if (itv->cur_dma_stream < 0) {
508 data[1] = 3; 546 del_timer(&itv->dma_timer);
509 else if (data[1] > 2)
510 return; 547 return;
511 s = &itv->streams[ivtv_stream_map[data[1]]]; 548 }
549 s = &itv->streams[itv->cur_dma_stream];
550 ivtv_stream_sync_for_cpu(s);
551
512 if (data[0] & 0x18) { 552 if (data[0] & 0x18) {
513 IVTV_DEBUG_WARN("ENC DMA ERROR %x\n", data[0]); 553 IVTV_DEBUG_WARN("ENC DMA ERROR %x (offset %08x, xfer %d of %d, retry %d)\n", data[0],
554 s->dma_offset, s->sg_processed, s->sg_processing_size, itv->dma_retries);
514 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS); 555 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
515 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, data[1]); 556 if (itv->dma_retries == 3) {
557 itv->dma_retries = 0;
558 }
559 else {
560 /* Retry, starting with the first xfer segment.
561 Just retrying the current segment is not sufficient. */
562 s->sg_processed = 0;
563 itv->dma_retries++;
564 }
516 } 565 }
517 s->SG_length = 0; 566 if (s->sg_processed < s->sg_processing_size) {
567 /* DMA next buffer */
568 ivtv_dma_enc_start_xfer(s);
569 return;
570 }
571 del_timer(&itv->dma_timer);
518 clear_bit(IVTV_F_I_DMA, &itv->i_flags); 572 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
519 itv->cur_dma_stream = -1; 573 itv->cur_dma_stream = -1;
520 dma_post(s); 574 dma_post(s);
521 ivtv_stream_sync_for_cpu(s);
522 if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) { 575 if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
523 u32 tmp;
524
525 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI]; 576 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
526 tmp = s->dma_offset;
527 s->dma_offset = itv->vbi.dma_offset;
528 dma_post(s); 577 dma_post(s);
529 s->dma_offset = tmp;
530 } 578 }
579 s->sg_processing_size = 0;
580 s->sg_processed = 0;
531 wake_up(&itv->dma_waitq); 581 wake_up(&itv->dma_waitq);
532} 582}
533 583
@@ -541,8 +591,7 @@ static void ivtv_irq_enc_pio_complete(struct ivtv *itv)
541 } 591 }
542 s = &itv->streams[itv->cur_pio_stream]; 592 s = &itv->streams[itv->cur_pio_stream];
543 IVTV_DEBUG_HI_IRQ("ENC PIO COMPLETE %s\n", s->name); 593 IVTV_DEBUG_HI_IRQ("ENC PIO COMPLETE %s\n", s->name);
544 s->SG_length = 0; 594 s->sg_pending_size = 0;
545 clear_bit(IVTV_F_I_ENC_VBI, &itv->i_flags);
546 clear_bit(IVTV_F_I_PIO, &itv->i_flags); 595 clear_bit(IVTV_F_I_PIO, &itv->i_flags);
547 itv->cur_pio_stream = -1; 596 itv->cur_pio_stream = -1;
548 dma_post(s); 597 dma_post(s);
@@ -554,13 +603,8 @@ static void ivtv_irq_enc_pio_complete(struct ivtv *itv)
554 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 2); 603 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 2);
555 clear_bit(IVTV_F_I_PIO, &itv->i_flags); 604 clear_bit(IVTV_F_I_PIO, &itv->i_flags);
556 if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) { 605 if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
557 u32 tmp;
558
559 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI]; 606 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
560 tmp = s->dma_offset;
561 s->dma_offset = itv->vbi.dma_offset;
562 dma_post(s); 607 dma_post(s);
563 s->dma_offset = tmp;
564 } 608 }
565 wake_up(&itv->dma_waitq); 609 wake_up(&itv->dma_waitq);
566} 610}
@@ -572,19 +616,23 @@ static void ivtv_irq_dma_err(struct ivtv *itv)
572 del_timer(&itv->dma_timer); 616 del_timer(&itv->dma_timer);
573 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, data); 617 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, data);
574 IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data[0], data[1], 618 IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data[0], data[1],
575 read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream); 619 read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
620 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
576 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) && 621 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) &&
577 itv->cur_dma_stream >= 0 && itv->cur_dma_stream < IVTV_MAX_STREAMS) { 622 itv->cur_dma_stream >= 0 && itv->cur_dma_stream < IVTV_MAX_STREAMS) {
578 struct ivtv_stream *s = &itv->streams[itv->cur_dma_stream]; 623 struct ivtv_stream *s = &itv->streams[itv->cur_dma_stream];
579 624
580 /* retry */ 625 /* retry */
581 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
582 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) 626 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
583 ivtv_dma_dec_start(s); 627 ivtv_dma_dec_start(s);
584 else 628 else
585 ivtv_dma_enc_start(s); 629 ivtv_dma_enc_start(s);
586 return; 630 return;
587 } 631 }
632 if (test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
633 ivtv_udma_start(itv);
634 return;
635 }
588 clear_bit(IVTV_F_I_UDMA, &itv->i_flags); 636 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
589 clear_bit(IVTV_F_I_DMA, &itv->i_flags); 637 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
590 itv->cur_dma_stream = -1; 638 itv->cur_dma_stream = -1;
@@ -628,14 +676,14 @@ static void ivtv_irq_enc_vbi_cap(struct ivtv *itv)
628 DMA the data. Since at most four VBI DMA buffers are available, 676 DMA the data. Since at most four VBI DMA buffers are available,
629 we just drop the old requests when there are already three 677 we just drop the old requests when there are already three
630 requests queued. */ 678 requests queued. */
631 if (s->SG_length > 2) { 679 if (s->sg_pending_size > 2) {
632 struct list_head *p; 680 struct list_head *p;
633 list_for_each(p, &s->q_predma.list) { 681 list_for_each(p, &s->q_predma.list) {
634 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list); 682 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
635 ivtv_buf_sync_for_cpu(s, buf); 683 ivtv_buf_sync_for_cpu(s, buf);
636 } 684 }
637 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_free, 0); 685 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_free, 0);
638 s->SG_length = 0; 686 s->sg_pending_size = 0;
639 } 687 }
640 /* if we can append the data, and the MPEG stream isn't capturing, 688 /* if we can append the data, and the MPEG stream isn't capturing,
641 then start a DMA request for just the VBI data. */ 689 then start a DMA request for just the VBI data. */
diff --git a/drivers/media/video/ivtv/ivtv-queue.c b/drivers/media/video/ivtv/ivtv-queue.c
index bff75aeee0a0..d9a1478ca1f7 100644
--- a/drivers/media/video/ivtv/ivtv-queue.c
+++ b/drivers/media/video/ivtv/ivtv-queue.c
@@ -195,7 +195,7 @@ void ivtv_flush_queues(struct ivtv_stream *s)
195int ivtv_stream_alloc(struct ivtv_stream *s) 195int ivtv_stream_alloc(struct ivtv_stream *s)
196{ 196{
197 struct ivtv *itv = s->itv; 197 struct ivtv *itv = s->itv;
198 int SGsize = sizeof(struct ivtv_SG_element) * s->buffers; 198 int SGsize = sizeof(struct ivtv_sg_element) * s->buffers;
199 int i; 199 int i;
200 200
201 if (s->buffers == 0) 201 if (s->buffers == 0)
@@ -205,27 +205,33 @@ int ivtv_stream_alloc(struct ivtv_stream *s)
205 s->dma != PCI_DMA_NONE ? "DMA " : "", 205 s->dma != PCI_DMA_NONE ? "DMA " : "",
206 s->name, s->buffers, s->buf_size, s->buffers * s->buf_size / 1024); 206 s->name, s->buffers, s->buf_size, s->buffers * s->buf_size / 1024);
207 207
208 if (ivtv_might_use_pio(s)) { 208 s->sg_pending = (struct ivtv_sg_element *)kzalloc(SGsize, GFP_KERNEL);
209 s->PIOarray = (struct ivtv_SG_element *)kzalloc(SGsize, GFP_KERNEL); 209 if (s->sg_pending == NULL) {
210 if (s->PIOarray == NULL) { 210 IVTV_ERR("Could not allocate sg_pending for %s stream\n", s->name);
211 IVTV_ERR("Could not allocate PIOarray for %s stream\n", s->name); 211 return -ENOMEM;
212 return -ENOMEM;
213 }
214 } 212 }
213 s->sg_pending_size = 0;
215 214
216 /* Allocate DMA SG Arrays */ 215 s->sg_processing = (struct ivtv_sg_element *)kzalloc(SGsize, GFP_KERNEL);
217 s->SGarray = (struct ivtv_SG_element *)kzalloc(SGsize, GFP_KERNEL); 216 if (s->sg_processing == NULL) {
218 if (s->SGarray == NULL) { 217 IVTV_ERR("Could not allocate sg_processing for %s stream\n", s->name);
219 IVTV_ERR("Could not allocate SGarray for %s stream\n", s->name); 218 kfree(s->sg_pending);
220 if (ivtv_might_use_pio(s)) { 219 s->sg_pending = NULL;
221 kfree(s->PIOarray); 220 return -ENOMEM;
222 s->PIOarray = NULL; 221 }
223 } 222 s->sg_processing_size = 0;
223
224 s->sg_dma = (struct ivtv_sg_element *)kzalloc(sizeof(struct ivtv_sg_element), GFP_KERNEL);
225 if (s->sg_dma == NULL) {
226 IVTV_ERR("Could not allocate sg_dma for %s stream\n", s->name);
227 kfree(s->sg_pending);
228 s->sg_pending = NULL;
229 kfree(s->sg_processing);
230 s->sg_processing = NULL;
224 return -ENOMEM; 231 return -ENOMEM;
225 } 232 }
226 s->SG_length = 0;
227 if (ivtv_might_use_dma(s)) { 233 if (ivtv_might_use_dma(s)) {
228 s->SG_handle = pci_map_single(itv->dev, s->SGarray, SGsize, s->dma); 234 s->sg_handle = pci_map_single(itv->dev, s->sg_dma, sizeof(struct ivtv_sg_element), s->dma);
229 ivtv_stream_sync_for_cpu(s); 235 ivtv_stream_sync_for_cpu(s);
230 } 236 }
231 237
@@ -272,16 +278,19 @@ void ivtv_stream_free(struct ivtv_stream *s)
272 } 278 }
273 279
274 /* Free SG Array/Lists */ 280 /* Free SG Array/Lists */
275 if (s->SGarray != NULL) { 281 if (s->sg_dma != NULL) {
276 if (s->SG_handle != IVTV_DMA_UNMAPPED) { 282 if (s->sg_handle != IVTV_DMA_UNMAPPED) {
277 pci_unmap_single(s->itv->dev, s->SG_handle, 283 pci_unmap_single(s->itv->dev, s->sg_handle,
278 sizeof(struct ivtv_SG_element) * s->buffers, PCI_DMA_TODEVICE); 284 sizeof(struct ivtv_sg_element), PCI_DMA_TODEVICE);
279 s->SG_handle = IVTV_DMA_UNMAPPED; 285 s->sg_handle = IVTV_DMA_UNMAPPED;
280 } 286 }
281 kfree(s->SGarray); 287 kfree(s->sg_pending);
282 kfree(s->PIOarray); 288 kfree(s->sg_processing);
283 s->PIOarray = NULL; 289 kfree(s->sg_dma);
284 s->SGarray = NULL; 290 s->sg_pending = NULL;
285 s->SG_length = 0; 291 s->sg_processing = NULL;
292 s->sg_dma = NULL;
293 s->sg_pending_size = 0;
294 s->sg_processing_size = 0;
286 } 295 }
287} 296}
diff --git a/drivers/media/video/ivtv/ivtv-queue.h b/drivers/media/video/ivtv/ivtv-queue.h
index 2ed8d548255d..14a9f7fe50aa 100644
--- a/drivers/media/video/ivtv/ivtv-queue.h
+++ b/drivers/media/video/ivtv/ivtv-queue.h
@@ -79,13 +79,13 @@ void ivtv_stream_free(struct ivtv_stream *s);
79static inline void ivtv_stream_sync_for_cpu(struct ivtv_stream *s) 79static inline void ivtv_stream_sync_for_cpu(struct ivtv_stream *s)
80{ 80{
81 if (ivtv_use_dma(s)) 81 if (ivtv_use_dma(s))
82 pci_dma_sync_single_for_cpu(s->itv->dev, s->SG_handle, 82 pci_dma_sync_single_for_cpu(s->itv->dev, s->sg_handle,
83 sizeof(struct ivtv_SG_element) * s->buffers, PCI_DMA_TODEVICE); 83 sizeof(struct ivtv_sg_element), PCI_DMA_TODEVICE);
84} 84}
85 85
86static inline void ivtv_stream_sync_for_device(struct ivtv_stream *s) 86static inline void ivtv_stream_sync_for_device(struct ivtv_stream *s)
87{ 87{
88 if (ivtv_use_dma(s)) 88 if (ivtv_use_dma(s))
89 pci_dma_sync_single_for_device(s->itv->dev, s->SG_handle, 89 pci_dma_sync_single_for_device(s->itv->dev, s->sg_handle,
90 sizeof(struct ivtv_SG_element) * s->buffers, PCI_DMA_TODEVICE); 90 sizeof(struct ivtv_sg_element), PCI_DMA_TODEVICE);
91} 91}
diff --git a/drivers/media/video/ivtv/ivtv-streams.c b/drivers/media/video/ivtv/ivtv-streams.c
index 0582b9d57c55..d1cc366c2a35 100644
--- a/drivers/media/video/ivtv/ivtv-streams.c
+++ b/drivers/media/video/ivtv/ivtv-streams.c
@@ -154,7 +154,7 @@ static void ivtv_stream_init(struct ivtv *itv, int type)
154 spin_lock_init(&s->qlock); 154 spin_lock_init(&s->qlock);
155 init_waitqueue_head(&s->waitq); 155 init_waitqueue_head(&s->waitq);
156 s->id = -1; 156 s->id = -1;
157 s->SG_handle = IVTV_DMA_UNMAPPED; 157 s->sg_handle = IVTV_DMA_UNMAPPED;
158 ivtv_queue_init(&s->q_free); 158 ivtv_queue_init(&s->q_free);
159 ivtv_queue_init(&s->q_full); 159 ivtv_queue_init(&s->q_full);
160 ivtv_queue_init(&s->q_dma); 160 ivtv_queue_init(&s->q_dma);