aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/media
diff options
context:
space:
mode:
authorAndy Walls <awalls@radix.net>2008-11-18 23:24:33 -0500
committerMauro Carvalho Chehab <mchehab@redhat.com>2008-12-30 06:38:10 -0500
commitbca11a5721917d6d5874571813673a2669ffec4b (patch)
treea2202bdf236804a123175b12585f4aae4fdc58d7 /drivers/media
parentd6c7e5f8faad080e75bace5c4f2265e3513e3510 (diff)
V4L/DVB (9726): cx18: Restore buffers that have fallen out of the transfer rotation
Restore buffers that have fallen out of the transfer rotation, and check for coherent mailbox data when processing a stale mailbox. Signed-off-by: Andy Walls <awalls@radix.net> Signed-off-by: Mauro Carvalho Chehab <mchehab@redhat.com>
Diffstat (limited to 'drivers/media')
-rw-r--r--drivers/media/video/cx18/cx18-driver.h1
-rw-r--r--drivers/media/video/cx18/cx18-mailbox.c59
-rw-r--r--drivers/media/video/cx18/cx18-queue.c59
3 files changed, 92 insertions, 27 deletions
diff --git a/drivers/media/video/cx18/cx18-driver.h b/drivers/media/video/cx18/cx18-driver.h
index cad352aeb837..f06290d32ecf 100644
--- a/drivers/media/video/cx18/cx18-driver.h
+++ b/drivers/media/video/cx18/cx18-driver.h
@@ -212,6 +212,7 @@ struct cx18_buffer {
212 dma_addr_t dma_handle; 212 dma_addr_t dma_handle;
213 u32 id; 213 u32 id;
214 unsigned long b_flags; 214 unsigned long b_flags;
215 unsigned skipped;
215 char *buf; 216 char *buf;
216 217
217 u32 bytesused; 218 u32 bytesused;
diff --git a/drivers/media/video/cx18/cx18-mailbox.c b/drivers/media/video/cx18/cx18-mailbox.c
index e5d4f3112293..abd39aaa345c 100644
--- a/drivers/media/video/cx18/cx18-mailbox.c
+++ b/drivers/media/video/cx18/cx18-mailbox.c
@@ -120,7 +120,7 @@ static void dump_mb(struct cx18 *cx, struct cx18_mailbox *mb, char *name)
120 120
121static void epu_dma_done(struct cx18 *cx, struct cx18_epu_work_order *order) 121static void epu_dma_done(struct cx18 *cx, struct cx18_epu_work_order *order)
122{ 122{
123 u32 handle, mdl_ack_count; 123 u32 handle, mdl_ack_count, id;
124 struct cx18_mailbox *mb; 124 struct cx18_mailbox *mb;
125 struct cx18_mdl_ack *mdl_ack; 125 struct cx18_mdl_ack *mdl_ack;
126 struct cx18_stream *s; 126 struct cx18_stream *s;
@@ -133,19 +133,50 @@ static void epu_dma_done(struct cx18 *cx, struct cx18_epu_work_order *order)
133 133
134 if (s == NULL) { 134 if (s == NULL) {
135 CX18_WARN("Got DMA done notification for unknown/inactive" 135 CX18_WARN("Got DMA done notification for unknown/inactive"
136 " handle %d\n", handle); 136 " handle %d, %s mailbox seq no %d\n", handle,
137 (order->flags & CX18_F_EWO_MB_STALE_UPON_RECEIPT) ?
138 "stale" : "good", mb->request);
137 return; 139 return;
138 } 140 }
139 141
140 mdl_ack_count = mb->args[2]; 142 mdl_ack_count = mb->args[2];
141 mdl_ack = order->mdl_ack; 143 mdl_ack = order->mdl_ack;
142 for (i = 0; i < mdl_ack_count; i++, mdl_ack++) { 144 for (i = 0; i < mdl_ack_count; i++, mdl_ack++) {
143 buf = cx18_queue_get_buf(s, mdl_ack->id, mdl_ack->data_used); 145 id = mdl_ack->id;
144 CX18_DEBUG_HI_DMA("DMA DONE for %s (buffer %d)\n", s->name, 146 /*
145 mdl_ack->id); 147 * Simple integrity check for processing a stale (and possibly
148 * inconsistent mailbox): make sure the buffer id is in the
149 * valid range for the stream.
150 *
151 * We go through the trouble of dealing with stale mailboxes
152 * because most of the time, the mailbox data is still valid and
153 * unchanged (and in practice the firmware ping-pongs the
154 * two mdl_ack buffers so mdl_acks are not stale).
155 *
156 * There are occasions when we get a half changed mailbox,
157 * which this check catches for a handle & id mismatch. If the
158 * handle and id do correspond, the worst case is that we
159 * completely lost the old buffer, but pick up the new buffer
160 * early (but the new mdl_ack is guaranteed to be good in this
161 * case as the firmware wouldn't point us to a new mdl_ack until
162 * it's filled in).
163 *
164 * cx18_queue_get buf() will detect the lost buffers
165 * and put them back in rotation eventually.
166 */
167 if ((order->flags & CX18_F_EWO_MB_STALE_UPON_RECEIPT) &&
168 !(id >= s->mdl_offset &&
169 id < (s->mdl_offset + s->buffers))) {
170 CX18_WARN("Fell behind! Ignoring stale mailbox with "
171 " inconsistent data. Lost buffer for mailbox "
172 "seq no %d\n", mb->request);
173 break;
174 }
175 buf = cx18_queue_get_buf(s, id, mdl_ack->data_used);
176 CX18_DEBUG_HI_DMA("DMA DONE for %s (buffer %d)\n", s->name, id);
146 if (buf == NULL) { 177 if (buf == NULL) {
147 CX18_WARN("Could not find buf %d for stream %s\n", 178 CX18_WARN("Could not find buf %d for stream %s\n",
148 mdl_ack->id, s->name); 179 id, s->name);
149 continue; 180 continue;
150 } 181 }
151 182
@@ -158,6 +189,7 @@ static void epu_dma_done(struct cx18 *cx, struct cx18_epu_work_order *order)
158 buf->bytesused); 189 buf->bytesused);
159 190
160 cx18_buf_sync_for_device(s, buf); 191 cx18_buf_sync_for_device(s, buf);
192 cx18_enqueue(s, buf, &s->q_free);
161 193
162 if (s->handle != CX18_INVALID_TASK_HANDLE && 194 if (s->handle != CX18_INVALID_TASK_HANDLE &&
163 test_bit(CX18_F_S_STREAMING, &s->s_flags)) 195 test_bit(CX18_F_S_STREAMING, &s->s_flags))
@@ -257,10 +289,10 @@ static void mb_ack_irq(struct cx18 *cx, struct cx18_epu_work_order *order)
257 /* Don't ack if the RPU has gotten impatient and timed us out */ 289 /* Don't ack if the RPU has gotten impatient and timed us out */
258 if (req != cx18_readl(cx, &ack_mb->request) || 290 if (req != cx18_readl(cx, &ack_mb->request) ||
259 req == cx18_readl(cx, &ack_mb->ack)) { 291 req == cx18_readl(cx, &ack_mb->ack)) {
260 CX18_WARN("Possibly falling behind: %s self-ack'ed our incoming" 292 CX18_DEBUG_WARN("Possibly falling behind: %s self-ack'ed our "
261 " %s to EPU mailbox (sequence no. %u) while " 293 "incoming %s to EPU mailbox (sequence no. %u) "
262 "processing\n", 294 "while processing\n",
263 rpu_str[order->rpu], rpu_str[order->rpu], req); 295 rpu_str[order->rpu], rpu_str[order->rpu], req);
264 order->flags |= CX18_F_EWO_MB_STALE_WHILE_PROC; 296 order->flags |= CX18_F_EWO_MB_STALE_WHILE_PROC;
265 return; 297 return;
266 } 298 }
@@ -407,9 +439,10 @@ void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu)
407 2 * sizeof(u32)); 439 2 * sizeof(u32));
408 440
409 if (order_mb->request == order_mb->ack) { 441 if (order_mb->request == order_mb->ack) {
410 CX18_WARN("Possibly falling behind: %s self-ack'ed our incoming" 442 CX18_DEBUG_WARN("Possibly falling behind: %s self-ack'ed our "
411 " %s to EPU mailbox (sequence no. %u)\n", 443 "incoming %s to EPU mailbox (sequence no. %u)"
412 rpu_str[rpu], rpu_str[rpu], order_mb->request); 444 "\n",
445 rpu_str[rpu], rpu_str[rpu], order_mb->request);
413 dump_mb(cx, order_mb, "incoming"); 446 dump_mb(cx, order_mb, "incoming");
414 order->flags = CX18_F_EWO_MB_STALE_UPON_RECEIPT; 447 order->flags = CX18_F_EWO_MB_STALE_UPON_RECEIPT;
415 } 448 }
diff --git a/drivers/media/video/cx18/cx18-queue.c b/drivers/media/video/cx18/cx18-queue.c
index 5a3839403631..ff6df36328fd 100644
--- a/drivers/media/video/cx18/cx18-queue.c
+++ b/drivers/media/video/cx18/cx18-queue.c
@@ -49,6 +49,7 @@ void cx18_enqueue(struct cx18_stream *s, struct cx18_buffer *buf,
49 buf->bytesused = 0; 49 buf->bytesused = 0;
50 buf->readpos = 0; 50 buf->readpos = 0;
51 buf->b_flags = 0; 51 buf->b_flags = 0;
52 buf->skipped = 0;
52 } 53 }
53 mutex_lock(&s->qlock); 54 mutex_lock(&s->qlock);
54 list_add_tail(&buf->list, &q->list); 55 list_add_tail(&buf->list, &q->list);
@@ -67,6 +68,7 @@ struct cx18_buffer *cx18_dequeue(struct cx18_stream *s, struct cx18_queue *q)
67 list_del_init(q->list.next); 68 list_del_init(q->list.next);
68 atomic_dec(&q->buffers); 69 atomic_dec(&q->buffers);
69 q->bytesused -= buf->bytesused - buf->readpos; 70 q->bytesused -= buf->bytesused - buf->readpos;
71 buf->skipped = 0;
70 } 72 }
71 mutex_unlock(&s->qlock); 73 mutex_unlock(&s->qlock);
72 return buf; 74 return buf;
@@ -76,34 +78,63 @@ struct cx18_buffer *cx18_queue_get_buf(struct cx18_stream *s, u32 id,
76 u32 bytesused) 78 u32 bytesused)
77{ 79{
78 struct cx18 *cx = s->cx; 80 struct cx18 *cx = s->cx;
79 struct list_head *p; 81 struct cx18_buffer *buf;
82 struct cx18_buffer *ret = NULL;
83 struct list_head *p, *t;
84 LIST_HEAD(r);
80 85
81 mutex_lock(&s->qlock); 86 mutex_lock(&s->qlock);
82 list_for_each(p, &s->q_free.list) { 87 list_for_each_safe(p, t, &s->q_free.list) {
83 struct cx18_buffer *buf = 88 buf = list_entry(p, struct cx18_buffer, list);
84 list_entry(p, struct cx18_buffer, list);
85 89
86 if (buf->id != id) { 90 if (buf->id != id) {
87 CX18_DEBUG_HI_DMA("Skipping buffer %d searching for %d " 91 buf->skipped++;
88 "in stream %s q_free\n", buf->id, id, 92 if (buf->skipped >= atomic_read(&s->q_free.buffers)-1) {
89 s->name); 93 /* buffer must have fallen out of rotation */
94 atomic_dec(&s->q_free.buffers);
95 list_move_tail(&buf->list, &r);
96 CX18_WARN("Skipped %s, buffer %d, %d "
97 "times - it must have dropped out of "
98 "rotation\n", s->name, buf->id,
99 buf->skipped);
100 }
90 continue; 101 continue;
91 } 102 }
92 103
93 buf->bytesused = bytesused; 104 buf->bytesused = bytesused;
94 if (s->type != CX18_ENC_STREAM_TYPE_TS) { 105 atomic_dec(&s->q_free.buffers);
95 atomic_dec(&s->q_free.buffers); 106 if (s->type == CX18_ENC_STREAM_TYPE_TS) {
107 /*
108 * TS doesn't use q_full, but for sweeping up lost
109 * buffers, we want the TS to requeue the buffer just
110 * before sending the MDL back to the firmware, so we
111 * pull it off the list here.
112 */
113 list_del_init(&buf->list);
114 } else {
96 atomic_inc(&s->q_full.buffers); 115 atomic_inc(&s->q_full.buffers);
97 s->q_full.bytesused += buf->bytesused; 116 s->q_full.bytesused += buf->bytesused;
98 list_move_tail(&buf->list, &s->q_full.list); 117 list_move_tail(&buf->list, &s->q_full.list);
99 } 118 }
100 119
101 mutex_unlock(&s->qlock); 120 ret = buf;
102 return buf; 121 break;
103 } 122 }
104 mutex_unlock(&s->qlock); 123 mutex_unlock(&s->qlock);
105 CX18_ERR("Cannot find buffer %d for stream %s\n", id, s->name); 124
106 return NULL; 125 /* Put lost buffers back into firmware transfer rotation */
126 while (!list_empty(&r)) {
127 buf = list_entry(r.next, struct cx18_buffer, list);
128 list_del_init(r.next);
129 cx18_enqueue(s, buf, &s->q_free);
130 cx18_vapi(cx, CX18_CPU_DE_SET_MDL, 5, s->handle,
131 (void __iomem *)&cx->scb->cpu_mdl[buf->id] - cx->enc_mem,
132 1, buf->id, s->buf_size);
133 CX18_INFO("Returning %s, buffer %d back to transfer rotation\n",
134 s->name, buf->id);
135 /* and there was much rejoicing... */
136 }
137 return ret;
107} 138}
108 139
109/* Move all buffers of a queue to q_free, while flushing the buffers */ 140/* Move all buffers of a queue to q_free, while flushing the buffers */
@@ -118,7 +149,7 @@ static void cx18_queue_flush(struct cx18_stream *s, struct cx18_queue *q)
118 while (!list_empty(&q->list)) { 149 while (!list_empty(&q->list)) {
119 buf = list_entry(q->list.next, struct cx18_buffer, list); 150 buf = list_entry(q->list.next, struct cx18_buffer, list);
120 list_move_tail(q->list.next, &s->q_free.list); 151 list_move_tail(q->list.next, &s->q_free.list);
121 buf->bytesused = buf->readpos = buf->b_flags = 0; 152 buf->bytesused = buf->readpos = buf->b_flags = buf->skipped = 0;
122 atomic_inc(&s->q_free.buffers); 153 atomic_inc(&s->q_free.buffers);
123 } 154 }
124 cx18_queue_init(q); 155 cx18_queue_init(q);