aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/media/video/ivtv/ivtv-irq.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/media/video/ivtv/ivtv-irq.c')
-rw-r--r--drivers/media/video/ivtv/ivtv-irq.c838
1 files changed, 838 insertions, 0 deletions
diff --git a/drivers/media/video/ivtv/ivtv-irq.c b/drivers/media/video/ivtv/ivtv-irq.c
new file mode 100644
index 000000000000..c3a047b381b3
--- /dev/null
+++ b/drivers/media/video/ivtv/ivtv-irq.c
@@ -0,0 +1,838 @@
1/* interrupt handling
2 Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
3 Copyright (C) 2004 Chris Kennedy <c@groovy.org>
4 Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include "ivtv-driver.h"
22#include "ivtv-firmware.h"
23#include "ivtv-fileops.h"
24#include "ivtv-queue.h"
25#include "ivtv-udma.h"
26#include "ivtv-irq.h"
27#include "ivtv-ioctl.h"
28#include "ivtv-mailbox.h"
29#include "ivtv-vbi.h"
30#include "ivtv-yuv.h"
31
32#define DMA_MAGIC_COOKIE 0x000001fe
33
34#define SLICED_VBI_PIO 1
35
36static void ivtv_dma_dec_start(struct ivtv_stream *s);
37
38static const int ivtv_stream_map[] = {
39 IVTV_ENC_STREAM_TYPE_MPG,
40 IVTV_ENC_STREAM_TYPE_YUV,
41 IVTV_ENC_STREAM_TYPE_PCM,
42 IVTV_ENC_STREAM_TYPE_VBI,
43};
44
45static inline int ivtv_use_pio(struct ivtv_stream *s)
46{
47 struct ivtv *itv = s->itv;
48
49 return s->dma == PCI_DMA_NONE ||
50 (SLICED_VBI_PIO && s->type == IVTV_ENC_STREAM_TYPE_VBI && itv->vbi.sliced_in->service_set);
51}
52
53void ivtv_irq_work_handler(struct work_struct *work)
54{
55 struct ivtv *itv = container_of(work, struct ivtv, irq_work_queue);
56
57 DEFINE_WAIT(wait);
58
59 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags))
60 vbi_work_handler(itv);
61
62 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags))
63 ivtv_yuv_work_handler(itv);
64}
65
66/* Determine the required DMA size, setup enough buffers in the predma queue and
67 actually copy the data from the card to the buffers in case a PIO transfer is
68 required for this stream.
69 */
70static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MAX_DATA])
71{
72 struct ivtv *itv = s->itv;
73 struct ivtv_buffer *buf;
74 struct list_head *p;
75 u32 bytes_needed = 0;
76 u32 offset, size;
77 u32 UVoffset = 0, UVsize = 0;
78 int skip_bufs = s->q_predma.buffers;
79 int idx = s->SG_length;
80 int rc;
81
82 /* sanity checks */
83 if (s->v4l2dev == NULL) {
84 IVTV_DEBUG_WARN("Stream %s not started\n", s->name);
85 return -1;
86 }
87 if (!test_bit(IVTV_F_S_CLAIMED, &s->s_flags)) {
88 IVTV_DEBUG_WARN("Stream %s not open\n", s->name);
89 return -1;
90 }
91
92 /* determine offset, size and PTS for the various streams */
93 switch (s->type) {
94 case IVTV_ENC_STREAM_TYPE_MPG:
95 offset = data[1];
96 size = data[2];
97 s->dma_pts = 0;
98 break;
99
100 case IVTV_ENC_STREAM_TYPE_YUV:
101 offset = data[1];
102 size = data[2];
103 UVoffset = data[3];
104 UVsize = data[4];
105 s->dma_pts = ((u64) data[5] << 32) | data[6];
106 break;
107
108 case IVTV_ENC_STREAM_TYPE_PCM:
109 offset = data[1] + 12;
110 size = data[2] - 12;
111 s->dma_pts = read_dec(offset - 8) |
112 ((u64)(read_dec(offset - 12)) << 32);
113 if (itv->has_cx23415)
114 offset += IVTV_DECODER_OFFSET;
115 break;
116
117 case IVTV_ENC_STREAM_TYPE_VBI:
118 size = itv->vbi.enc_size * itv->vbi.fpi;
119 offset = read_enc(itv->vbi.enc_start - 4) + 12;
120 if (offset == 12) {
121 IVTV_DEBUG_INFO("VBI offset == 0\n");
122 return -1;
123 }
124 s->dma_pts = read_enc(offset - 4) | ((u64)read_enc(offset - 8) << 32);
125 break;
126
127 case IVTV_DEC_STREAM_TYPE_VBI:
128 size = read_dec(itv->vbi.dec_start + 4) + 8;
129 offset = read_dec(itv->vbi.dec_start) + itv->vbi.dec_start;
130 s->dma_pts = 0;
131 offset += IVTV_DECODER_OFFSET;
132 break;
133 default:
134 /* shouldn't happen */
135 return -1;
136 }
137
138 /* if this is the start of the DMA then fill in the magic cookie */
139 if (s->SG_length == 0) {
140 if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
141 s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
142 s->dma_backup = read_dec(offset - IVTV_DECODER_OFFSET);
143 write_dec_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset - IVTV_DECODER_OFFSET);
144 }
145 else {
146 s->dma_backup = read_enc(offset);
147 write_enc_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset);
148 }
149 s->dma_offset = offset;
150 }
151
152 bytes_needed = size;
153 if (s->type == IVTV_ENC_STREAM_TYPE_YUV) {
154 /* The size for the Y samples needs to be rounded upwards to a
155 multiple of the buf_size. The UV samples then start in the
156 next buffer. */
157 bytes_needed = s->buf_size * ((bytes_needed + s->buf_size - 1) / s->buf_size);
158 bytes_needed += UVsize;
159 }
160
161 IVTV_DEBUG_DMA("%s %s: 0x%08x bytes at 0x%08x\n",
162 ivtv_use_pio(s) ? "PIO" : "DMA", s->name, bytes_needed, offset);
163
164 rc = ivtv_queue_move(s, &s->q_free, &s->q_full, &s->q_predma, bytes_needed);
165 if (rc < 0) { /* Insufficient buffers */
166 IVTV_DEBUG_WARN("Cannot obtain %d bytes for %s data transfer\n",
167 bytes_needed, s->name);
168 return -1;
169 }
170 if (rc && !s->buffers_stolen && (s->s_flags & IVTV_F_S_APPL_IO)) {
171 IVTV_WARN("All %s stream buffers are full. Dropping data.\n", s->name);
172 IVTV_WARN("Cause: the application is not reading fast enough.\n");
173 }
174 s->buffers_stolen = rc;
175
176 /* got the buffers, now fill in SGarray (DMA) or copy the data from the card
177 to the buffers (PIO). */
178 buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list);
179 memset(buf->buf, 0, 128);
180 list_for_each(p, &s->q_predma.list) {
181 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
182
183 if (skip_bufs-- > 0)
184 continue;
185 if (!ivtv_use_pio(s)) {
186 s->SGarray[idx].dst = cpu_to_le32(buf->dma_handle);
187 s->SGarray[idx].src = cpu_to_le32(offset);
188 s->SGarray[idx].size = cpu_to_le32(s->buf_size);
189 }
190 buf->bytesused = (size < s->buf_size) ? size : s->buf_size;
191
192 /* If PIO, then copy the data from the card to the buffer */
193 if (s->type == IVTV_DEC_STREAM_TYPE_VBI) {
194 memcpy_fromio(buf->buf, itv->dec_mem + offset - IVTV_DECODER_OFFSET, buf->bytesused);
195 }
196 else if (ivtv_use_pio(s)) {
197 memcpy_fromio(buf->buf, itv->enc_mem + offset, buf->bytesused);
198 }
199
200 s->q_predma.bytesused += buf->bytesused;
201 size -= buf->bytesused;
202 offset += s->buf_size;
203
204 /* Sync SG buffers */
205 ivtv_buf_sync_for_device(s, buf);
206
207 if (size == 0) { /* YUV */
208 /* process the UV section */
209 offset = UVoffset;
210 size = UVsize;
211 }
212 idx++;
213 }
214 s->SG_length = idx;
215 return 0;
216}
217
218static void dma_post(struct ivtv_stream *s)
219{
220 struct ivtv *itv = s->itv;
221 struct ivtv_buffer *buf = NULL;
222 struct list_head *p;
223 u32 offset;
224 u32 *u32buf;
225 int x = 0;
226
227 if (ivtv_use_pio(s)) {
228 if (s->q_predma.bytesused)
229 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
230 s->SG_length = 0;
231 }
232 IVTV_DEBUG_DMA("%s %s completed (%x)\n", ivtv_use_pio(s) ? "PIO" : "DMA",
233 s->name, s->dma_offset);
234 list_for_each(p, &s->q_dma.list) {
235 buf = list_entry(p, struct ivtv_buffer, list);
236 u32buf = (u32 *)buf->buf;
237
238 /* Sync Buffer */
239 ivtv_buf_sync_for_cpu(s, buf);
240
241 if (x == 0) {
242 offset = s->dma_last_offset;
243 if (u32buf[offset / 4] != DMA_MAGIC_COOKIE)
244 {
245 for (offset = 0; offset < 64; offset++) {
246 if (u32buf[offset] == DMA_MAGIC_COOKIE) {
247 break;
248 }
249 }
250 offset *= 4;
251 if (offset == 256) {
252 IVTV_DEBUG_WARN("%s: Couldn't find start of buffer within the first 256 bytes\n", s->name);
253 offset = s->dma_last_offset;
254 }
255 if (s->dma_last_offset != offset)
256 IVTV_DEBUG_WARN("%s: offset %d -> %d\n", s->name, s->dma_last_offset, offset);
257 s->dma_last_offset = offset;
258 }
259 if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
260 s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
261 write_dec_sync(0, s->dma_offset - IVTV_DECODER_OFFSET);
262 }
263 else {
264 write_enc_sync(0, s->dma_offset);
265 }
266 if (offset) {
267 buf->bytesused -= offset;
268 memcpy(buf->buf, buf->buf + offset, buf->bytesused + offset);
269 }
270 *u32buf = cpu_to_le32(s->dma_backup);
271 }
272 x++;
273 /* flag byteswap ABCD -> DCBA for MPG & VBI data outside irq */
274 if (s->type == IVTV_ENC_STREAM_TYPE_MPG ||
275 s->type == IVTV_ENC_STREAM_TYPE_VBI)
276 set_bit(IVTV_F_B_NEED_BUF_SWAP, &buf->b_flags);
277 }
278 if (buf)
279 buf->bytesused += s->dma_last_offset;
280 if (buf && s->type == IVTV_DEC_STREAM_TYPE_VBI) {
281 /* Parse and Groom VBI Data */
282 s->q_dma.bytesused -= buf->bytesused;
283 ivtv_process_vbi_data(itv, buf, 0, s->type);
284 s->q_dma.bytesused += buf->bytesused;
285 if (s->id == -1) {
286 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0);
287 return;
288 }
289 }
290 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_full, s->q_dma.bytesused);
291 if (s->id != -1)
292 wake_up(&s->waitq);
293}
294
295void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock)
296{
297 struct ivtv *itv = s->itv;
298 struct ivtv_buffer *buf;
299 struct list_head *p;
300 u32 y_size = itv->params.height * itv->params.width;
301 u32 uv_offset = offset + IVTV_YUV_BUFFER_UV_OFFSET;
302 int y_done = 0;
303 int bytes_written = 0;
304 unsigned long flags = 0;
305 int idx = 0;
306
307 IVTV_DEBUG_DMA("DEC PREPARE DMA %s: %08x %08x\n", s->name, s->q_predma.bytesused, offset);
308 buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list);
309 list_for_each(p, &s->q_predma.list) {
310 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
311
312 /* YUV UV Offset from Y Buffer */
313 if (s->type == IVTV_DEC_STREAM_TYPE_YUV && !y_done && bytes_written >= y_size) {
314 offset = uv_offset;
315 y_done = 1;
316 }
317 s->SGarray[idx].src = cpu_to_le32(buf->dma_handle);
318 s->SGarray[idx].dst = cpu_to_le32(offset);
319 s->SGarray[idx].size = cpu_to_le32(buf->bytesused);
320
321 offset += buf->bytesused;
322 bytes_written += buf->bytesused;
323
324 /* Sync SG buffers */
325 ivtv_buf_sync_for_device(s, buf);
326 idx++;
327 }
328 s->SG_length = idx;
329
330 /* Mark last buffer size for Interrupt flag */
331 s->SGarray[s->SG_length - 1].size |= cpu_to_le32(0x80000000);
332
333 /* Sync Hardware SG List of buffers */
334 ivtv_stream_sync_for_device(s);
335 if (lock)
336 spin_lock_irqsave(&itv->dma_reg_lock, flags);
337 if (!test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
338 ivtv_dma_dec_start(s);
339 }
340 else {
341 set_bit(IVTV_F_S_DMA_PENDING, &s->s_flags);
342 }
343 if (lock)
344 spin_unlock_irqrestore(&itv->dma_reg_lock, flags);
345}
346
347/* start the encoder DMA */
348static void ivtv_dma_enc_start(struct ivtv_stream *s)
349{
350 struct ivtv *itv = s->itv;
351 struct ivtv_stream *s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
352 int i;
353
354 if (s->q_predma.bytesused)
355 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
356 IVTV_DEBUG_DMA("start DMA for %s\n", s->name);
357 s->SGarray[s->SG_length - 1].size = cpu_to_le32(le32_to_cpu(s->SGarray[s->SG_length - 1].size) + 256);
358
359 /* If this is an MPEG stream, and VBI data is also pending, then append the
360 VBI DMA to the MPEG DMA and transfer both sets of data at once.
361
362 VBI DMA is a second class citizen compared to MPEG and mixing them together
363 will confuse the firmware (the end of a VBI DMA is seen as the end of a
364 MPEG DMA, thus effectively dropping an MPEG frame). So instead we make
365 sure we only use the MPEG DMA to transfer the VBI DMA if both are in
366 use. This way no conflicts occur. */
367 clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
368 if (s->type == IVTV_ENC_STREAM_TYPE_MPG && s_vbi->SG_length &&
369 s->SG_length + s_vbi->SG_length <= s->buffers) {
370 ivtv_queue_move(s_vbi, &s_vbi->q_predma, NULL, &s_vbi->q_dma, s_vbi->q_predma.bytesused);
371 s_vbi->SGarray[s_vbi->SG_length - 1].size = cpu_to_le32(le32_to_cpu(s_vbi->SGarray[s->SG_length - 1].size) + 256);
372 for (i = 0; i < s_vbi->SG_length; i++) {
373 s->SGarray[s->SG_length++] = s_vbi->SGarray[i];
374 }
375 itv->vbi.dma_offset = s_vbi->dma_offset;
376 s_vbi->SG_length = 0;
377 set_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
378 IVTV_DEBUG_DMA("include DMA for %s\n", s->name);
379 }
380
381 /* Mark last buffer size for Interrupt flag */
382 s->SGarray[s->SG_length - 1].size |= cpu_to_le32(0x80000000);
383
384 /* Sync Hardware SG List of buffers */
385 ivtv_stream_sync_for_device(s);
386 write_reg(s->SG_handle, IVTV_REG_ENCDMAADDR);
387 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x02, IVTV_REG_DMAXFER);
388 set_bit(IVTV_F_I_DMA, &itv->i_flags);
389 itv->cur_dma_stream = s->type;
390 itv->dma_timer.expires = jiffies + HZ / 10;
391 add_timer(&itv->dma_timer);
392}
393
394static void ivtv_dma_dec_start(struct ivtv_stream *s)
395{
396 struct ivtv *itv = s->itv;
397
398 if (s->q_predma.bytesused)
399 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
400 IVTV_DEBUG_DMA("start DMA for %s\n", s->name);
401 /* put SG Handle into register 0x0c */
402 write_reg(s->SG_handle, IVTV_REG_DECDMAADDR);
403 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER);
404 set_bit(IVTV_F_I_DMA, &itv->i_flags);
405 itv->cur_dma_stream = s->type;
406 itv->dma_timer.expires = jiffies + HZ / 10;
407 add_timer(&itv->dma_timer);
408}
409
410static void ivtv_irq_dma_read(struct ivtv *itv)
411{
412 struct ivtv_stream *s = NULL;
413 struct ivtv_buffer *buf;
414 int hw_stream_type;
415
416 IVTV_DEBUG_IRQ("DEC DMA READ\n");
417 del_timer(&itv->dma_timer);
418 if (read_reg(IVTV_REG_DMASTATUS) & 0x14) {
419 IVTV_DEBUG_WARN("DEC DMA ERROR %x\n", read_reg(IVTV_REG_DMASTATUS));
420 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
421 }
422 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
423 if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) {
424 s = &itv->streams[IVTV_DEC_STREAM_TYPE_YUV];
425 hw_stream_type = 2;
426 }
427 else {
428 s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG];
429 hw_stream_type = 0;
430 }
431 IVTV_DEBUG_DMA("DEC DATA READ %s: %d\n", s->name, s->q_dma.bytesused);
432
433 ivtv_stream_sync_for_cpu(s);
434
435 /* For some reason must kick the firmware, like PIO mode,
436 I think this tells the firmware we are done and the size
437 of the xfer so it can calculate what we need next.
438 I think we can do this part ourselves but would have to
439 fully calculate xfer info ourselves and not use interrupts
440 */
441 ivtv_vapi(itv, CX2341X_DEC_SCHED_DMA_FROM_HOST, 3, 0, s->q_dma.bytesused,
442 hw_stream_type);
443
444 /* Free last DMA call */
445 while ((buf = ivtv_dequeue(s, &s->q_dma)) != NULL) {
446 ivtv_buf_sync_for_cpu(s, buf);
447 ivtv_enqueue(s, buf, &s->q_free);
448 }
449 wake_up(&s->waitq);
450 }
451 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
452 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
453 itv->cur_dma_stream = -1;
454 wake_up(&itv->dma_waitq);
455}
456
457static void ivtv_irq_enc_dma_complete(struct ivtv *itv)
458{
459 u32 data[CX2341X_MBOX_MAX_DATA];
460 struct ivtv_stream *s;
461
462 del_timer(&itv->dma_timer);
463 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, data);
464 IVTV_DEBUG_IRQ("ENC DMA COMPLETE %x %d\n", data[0], data[1]);
465 if (test_and_clear_bit(IVTV_F_I_ENC_VBI, &itv->i_flags))
466 data[1] = 3;
467 else if (data[1] > 2)
468 return;
469 s = &itv->streams[ivtv_stream_map[data[1]]];
470 if (data[0] & 0x18) {
471 IVTV_DEBUG_WARN("ENC DMA ERROR %x\n", data[0]);
472 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
473 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, data[1]);
474 }
475 s->SG_length = 0;
476 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
477 itv->cur_dma_stream = -1;
478 dma_post(s);
479 ivtv_stream_sync_for_cpu(s);
480 if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
481 u32 tmp;
482
483 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
484 tmp = s->dma_offset;
485 s->dma_offset = itv->vbi.dma_offset;
486 dma_post(s);
487 s->dma_offset = tmp;
488 }
489 wake_up(&itv->dma_waitq);
490}
491
492static void ivtv_irq_dma_err(struct ivtv *itv)
493{
494 u32 data[CX2341X_MBOX_MAX_DATA];
495
496 del_timer(&itv->dma_timer);
497 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, data);
498 IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data[0], data[1],
499 read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
500 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) &&
501 itv->cur_dma_stream >= 0 && itv->cur_dma_stream < IVTV_MAX_STREAMS) {
502 struct ivtv_stream *s = &itv->streams[itv->cur_dma_stream];
503
504 /* retry */
505 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
506 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
507 ivtv_dma_dec_start(s);
508 else
509 ivtv_dma_enc_start(s);
510 return;
511 }
512 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
513 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
514 itv->cur_dma_stream = -1;
515 wake_up(&itv->dma_waitq);
516}
517
518static void ivtv_irq_enc_start_cap(struct ivtv *itv)
519{
520 u32 data[CX2341X_MBOX_MAX_DATA];
521 struct ivtv_stream *s;
522
523 /* Get DMA destination and size arguments from card */
524 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA, data);
525 IVTV_DEBUG_IRQ("ENC START CAP %d: %08x %08x\n", data[0], data[1], data[2]);
526
527 if (data[0] > 2 || data[1] == 0 || data[2] == 0) {
528 IVTV_DEBUG_WARN("Unknown input: %08x %08x %08x\n",
529 data[0], data[1], data[2]);
530 return;
531 }
532 clear_bit(IVTV_F_I_ENC_VBI, &itv->i_flags);
533 s = &itv->streams[ivtv_stream_map[data[0]]];
534 if (!stream_enc_dma_append(s, data)) {
535 if (ivtv_use_pio(s)) {
536 dma_post(s);
537 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, data[0]);
538 }
539 else {
540 set_bit(IVTV_F_S_DMA_PENDING, &s->s_flags);
541 }
542 }
543}
544
545static void ivtv_irq_enc_vbi_cap(struct ivtv *itv)
546{
547 struct ivtv_stream *s_mpg = &itv->streams[IVTV_ENC_STREAM_TYPE_MPG];
548 u32 data[CX2341X_MBOX_MAX_DATA];
549 struct ivtv_stream *s;
550
551 IVTV_DEBUG_IRQ("ENC START VBI CAP\n");
552 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
553
554 if (ivtv_use_pio(s)) {
555 if (stream_enc_dma_append(s, data))
556 return;
557 if (s->q_predma.bytesused)
558 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
559 s->SG_length = 0;
560 dma_post(s);
561 return;
562 }
563 /* If more than two VBI buffers are pending, then
564 clear the old ones and start with this new one.
565 This can happen during transition stages when MPEG capturing is
566 started, but the first interrupts haven't arrived yet. During
567 that period VBI requests can accumulate without being able to
568 DMA the data. Since at most four VBI DMA buffers are available,
569 we just drop the old requests when there are already three
570 requests queued. */
571 if (s->SG_length > 2) {
572 struct list_head *p;
573 list_for_each(p, &s->q_predma.list) {
574 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
575 ivtv_buf_sync_for_cpu(s, buf);
576 }
577 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_free, 0);
578 s->SG_length = 0;
579 }
580 /* if we can append the data, and the MPEG stream isn't capturing,
581 then start a DMA request for just the VBI data. */
582 if (!stream_enc_dma_append(s, data) &&
583 !test_bit(IVTV_F_S_STREAMING, &s_mpg->s_flags)) {
584 set_bit(IVTV_F_I_ENC_VBI, &itv->i_flags);
585 set_bit(IVTV_F_S_DMA_PENDING, &s->s_flags);
586 }
587}
588
589static void ivtv_irq_dev_vbi_reinsert(struct ivtv *itv)
590{
591 u32 data[CX2341X_MBOX_MAX_DATA];
592 struct ivtv_stream *s = &itv->streams[IVTV_DEC_STREAM_TYPE_VBI];
593
594 IVTV_DEBUG_IRQ("DEC VBI REINSERT\n");
595 if (test_bit(IVTV_F_S_CLAIMED, &s->s_flags) &&
596 !stream_enc_dma_append(s, data)) {
597 dma_post(s);
598 }
599}
600
601static void ivtv_irq_dec_data_req(struct ivtv *itv)
602{
603 u32 data[CX2341X_MBOX_MAX_DATA];
604 struct ivtv_stream *s;
605
606 /* YUV or MPG */
607 ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, data);
608
609 if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) {
610 itv->dma_data_req_size = itv->params.width * itv->params.height * 3 / 2;
611 itv->dma_data_req_offset = data[1] ? data[1] : yuv_offset[0];
612 s = &itv->streams[IVTV_DEC_STREAM_TYPE_YUV];
613 }
614 else {
615 itv->dma_data_req_size = data[2] >= 0x10000 ? 0x10000 : data[2];
616 itv->dma_data_req_offset = data[1];
617 s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG];
618 }
619 IVTV_DEBUG_IRQ("DEC DATA REQ %s: %d %08x %u\n", s->name, s->q_full.bytesused,
620 itv->dma_data_req_offset, itv->dma_data_req_size);
621 if (itv->dma_data_req_size == 0 || s->q_full.bytesused < itv->dma_data_req_size) {
622 set_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
623 }
624 else {
625 clear_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
626 ivtv_queue_move(s, &s->q_full, NULL, &s->q_predma, itv->dma_data_req_size);
627 ivtv_dma_stream_dec_prepare(s, itv->dma_data_req_offset + IVTV_DECODER_OFFSET, 0);
628 }
629}
630
631static void ivtv_irq_vsync(struct ivtv *itv)
632{
633 /* The vsync interrupt is unusual in that it won't clear until
634 * the end of the first line for the current field, at which
635 * point it clears itself. This can result in repeated vsync
636 * interrupts, or a missed vsync. Read some of the registers
637 * to determine the line being displayed and ensure we handle
638 * one vsync per frame.
639 */
640 unsigned int frame = read_reg(0x28c0) & 1;
641 int last_dma_frame = atomic_read(&itv->yuv_info.next_dma_frame);
642
643 if (0) IVTV_DEBUG_IRQ("DEC VSYNC\n");
644
645 if (((frame ^ itv->yuv_info.lace_sync_field) == 0 && ((itv->lastVsyncFrame & 1) ^ itv->yuv_info.lace_sync_field)) ||
646 (frame != (itv->lastVsyncFrame & 1) && !itv->yuv_info.frame_interlaced)) {
647 int next_dma_frame = last_dma_frame;
648
649 if (next_dma_frame >= 0 && next_dma_frame != atomic_read(&itv->yuv_info.next_fill_frame)) {
650 write_reg(yuv_offset[next_dma_frame] >> 4, 0x82c);
651 write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x830);
652 write_reg(yuv_offset[next_dma_frame] >> 4, 0x834);
653 write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x838);
654 next_dma_frame = (next_dma_frame + 1) & 0x3;
655 atomic_set(&itv->yuv_info.next_dma_frame, next_dma_frame);
656 }
657 }
658 if (frame != (itv->lastVsyncFrame & 1)) {
659 struct ivtv_stream *s = ivtv_get_output_stream(itv);
660 int work = 0;
661
662 itv->lastVsyncFrame += 1;
663 if (frame == 0) {
664 clear_bit(IVTV_F_I_VALID_DEC_TIMINGS, &itv->i_flags);
665 clear_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
666 }
667 else {
668 set_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
669 }
670 if (test_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags)) {
671 set_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags);
672 wake_up(&itv->event_waitq);
673 }
674 wake_up(&itv->vsync_waitq);
675 if (s)
676 wake_up(&s->waitq);
677
678 /* Send VBI to saa7127 */
679 if (frame) {
680 set_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags);
681 work = 1;
682 }
683
684 /* Check if we need to update the yuv registers */
685 if ((itv->yuv_info.yuv_forced_update || itv->yuv_info.new_frame_info[last_dma_frame].update) && last_dma_frame != -1) {
686 if (!itv->yuv_info.new_frame_info[last_dma_frame].update)
687 last_dma_frame = (last_dma_frame - 1) & 3;
688
689 if (itv->yuv_info.new_frame_info[last_dma_frame].src_w) {
690 itv->yuv_info.update_frame = last_dma_frame;
691 itv->yuv_info.new_frame_info[last_dma_frame].update = 0;
692 itv->yuv_info.yuv_forced_update = 0;
693 set_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags);
694 work = 1;
695 }
696 }
697 if (work)
698 queue_work(itv->irq_work_queues, &itv->irq_work_queue);
699 }
700}
701
702#define IVTV_IRQ_DMA (IVTV_IRQ_DMA_READ | IVTV_IRQ_ENC_DMA_COMPLETE | IVTV_IRQ_DMA_ERR | IVTV_IRQ_ENC_START_CAP | IVTV_IRQ_ENC_VBI_CAP | IVTV_IRQ_DEC_DATA_REQ)
703
704irqreturn_t ivtv_irq_handler(int irq, void *dev_id)
705{
706 struct ivtv *itv = (struct ivtv *)dev_id;
707 u32 combo;
708 u32 stat;
709 int i;
710 u8 vsync_force = 0;
711
712 spin_lock(&itv->dma_reg_lock);
713 /* get contents of irq status register */
714 stat = read_reg(IVTV_REG_IRQSTATUS);
715
716 combo = ~itv->irqmask & stat;
717
718 /* Clear out IRQ */
719 if (combo) write_reg(combo, IVTV_REG_IRQSTATUS);
720
721 if (0 == combo) {
722 /* The vsync interrupt is unusual and clears itself. If we
723 * took too long, we may have missed it. Do some checks
724 */
725 if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
726 /* vsync is enabled, see if we're in a new field */
727 if ((itv->lastVsyncFrame & 1) != (read_reg(0x28c0) & 1)) {
728 /* New field, looks like we missed it */
729 IVTV_DEBUG_YUV("VSync interrupt missed %d\n",read_reg(0x28c0)>>16);
730 vsync_force = 1;
731 }
732 }
733
734 if (!vsync_force) {
735 /* No Vsync expected, wasn't for us */
736 spin_unlock(&itv->dma_reg_lock);
737 return IRQ_NONE;
738 }
739 }
740
741 /* Exclude interrupts noted below from the output, otherwise the log is flooded with
742 these messages */
743 if (combo & ~0xff6d0400)
744 IVTV_DEBUG_IRQ("======= valid IRQ bits: 0x%08x ======\n", combo);
745
746 if (combo & IVTV_IRQ_DEC_DMA_COMPLETE) {
747 IVTV_DEBUG_IRQ("DEC DMA COMPLETE\n");
748 }
749
750 if (combo & IVTV_IRQ_DMA_READ) {
751 ivtv_irq_dma_read(itv);
752 }
753
754 if (combo & IVTV_IRQ_ENC_DMA_COMPLETE) {
755 ivtv_irq_enc_dma_complete(itv);
756 }
757
758 if (combo & IVTV_IRQ_DMA_ERR) {
759 ivtv_irq_dma_err(itv);
760 }
761
762 if (combo & IVTV_IRQ_ENC_START_CAP) {
763 ivtv_irq_enc_start_cap(itv);
764 }
765
766 if (combo & IVTV_IRQ_ENC_VBI_CAP) {
767 ivtv_irq_enc_vbi_cap(itv);
768 }
769
770 if (combo & IVTV_IRQ_DEC_VBI_RE_INSERT) {
771 ivtv_irq_dev_vbi_reinsert(itv);
772 }
773
774 if (combo & IVTV_IRQ_ENC_EOS) {
775 IVTV_DEBUG_IRQ("ENC EOS\n");
776 set_bit(IVTV_F_I_EOS, &itv->i_flags);
777 wake_up(&itv->cap_w);
778 }
779
780 if (combo & IVTV_IRQ_DEC_DATA_REQ) {
781 ivtv_irq_dec_data_req(itv);
782 }
783
784 /* Decoder Vertical Sync - We can't rely on 'combo', so check if vsync enabled */
785 if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
786 ivtv_irq_vsync(itv);
787 }
788
789 if (combo & IVTV_IRQ_ENC_VIM_RST) {
790 IVTV_DEBUG_IRQ("VIM RST\n");
791 /*ivtv_vapi(itv, CX2341X_ENC_REFRESH_INPUT, 0); */
792 }
793
794 if (combo & IVTV_IRQ_DEC_AUD_MODE_CHG) {
795 IVTV_DEBUG_INFO("Stereo mode changed\n");
796 }
797
798 if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
799 for (i = 0; i < IVTV_MAX_STREAMS; i++) {
800 int idx = (i + itv->irq_rr_idx++) % IVTV_MAX_STREAMS;
801 struct ivtv_stream *s = &itv->streams[idx];
802
803 if (!test_and_clear_bit(IVTV_F_S_DMA_PENDING, &s->s_flags))
804 continue;
805 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
806 ivtv_dma_dec_start(s);
807 else
808 ivtv_dma_enc_start(s);
809 break;
810 }
811 if (i == IVTV_MAX_STREAMS && test_and_clear_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags)) {
812 ivtv_udma_start(itv);
813 }
814 }
815
816 spin_unlock(&itv->dma_reg_lock);
817
818 /* If we've just handled a 'forced' vsync, it's safest to say it
819 * wasn't ours. Another device may have triggered it at just
820 * the right time.
821 */
822 return vsync_force ? IRQ_NONE : IRQ_HANDLED;
823}
824
825void ivtv_unfinished_dma(unsigned long arg)
826{
827 struct ivtv *itv = (struct ivtv *)arg;
828
829 if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
830 return;
831 IVTV_ERR("DMA TIMEOUT %08x %d\n", read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
832
833 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
834 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
835 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
836 itv->cur_dma_stream = -1;
837 wake_up(&itv->dma_waitq);
838}