aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/media/video/cx18
diff options
context:
space:
mode:
authorAndy Walls <awalls@radix.net>2008-11-16 19:18:00 -0500
committerMauro Carvalho Chehab <mchehab@redhat.com>2008-12-30 06:38:09 -0500
commit72a4f8081af1c53a1673c173ce0fdd85c4b7d403 (patch)
tree4686617996faf5a22c89947014e4883c9d3de61d /drivers/media/video/cx18
parentf576ceefb481e5617ecfb77e3a05b3d26dbf2f92 (diff)
V4L/DVB (9723): cx18: Propagate staleness of mailbox and mdl ack data to work handler
cx18: Propagate staleness of mailbox and mdl ack data to work handler to let the work handler know that the data from the encoder may not be coherent. Allows for smarter handling of buffers in future, to deal with MDLs that fall out of rotation due to irq handler being late in collecting mailbox and mdl ack info. Signed-off-by: Andy Walls <awalls@radix.net> Signed-off-by: Mauro Carvalho Chehab <mchehab@redhat.com>
Diffstat (limited to 'drivers/media/video/cx18')
-rw-r--r--drivers/media/video/cx18/cx18-driver.h7
-rw-r--r--drivers/media/video/cx18/cx18-mailbox.c34
2 files changed, 27 insertions, 14 deletions
diff --git a/drivers/media/video/cx18/cx18-driver.h b/drivers/media/video/cx18/cx18-driver.h
index 041fa660a7c2..749bbb60a292 100644
--- a/drivers/media/video/cx18/cx18-driver.h
+++ b/drivers/media/video/cx18/cx18-driver.h
@@ -245,13 +245,20 @@ struct cx18_dvb {
245struct cx18; /* forward reference */ 245struct cx18; /* forward reference */
246struct cx18_scb; /* forward reference */ 246struct cx18_scb; /* forward reference */
247 247
248
248#define CX18_MAX_MDL_ACKS 2 249#define CX18_MAX_MDL_ACKS 2
249#define CX18_MAX_EPU_WORK_ORDERS 70 /* CPU_DE_RELEASE_MDL bursts 63 commands */ 250#define CX18_MAX_EPU_WORK_ORDERS 70 /* CPU_DE_RELEASE_MDL bursts 63 commands */
250 251
252#define CX18_F_EWO_MB_STALE_UPON_RECEIPT 0x1
253#define CX18_F_EWO_MB_STALE_WHILE_PROC 0x2
254#define CX18_F_EWO_MB_STALE \
255 (CX18_F_EWO_MB_STALE_UPON_RECEIPT | CX18_F_EWO_MB_STALE_WHILE_PROC)
256
251struct cx18_epu_work_order { 257struct cx18_epu_work_order {
252 struct work_struct work; 258 struct work_struct work;
253 atomic_t pending; 259 atomic_t pending;
254 struct cx18 *cx; 260 struct cx18 *cx;
261 unsigned long flags;
255 int rpu; 262 int rpu;
256 struct cx18_mailbox mb; 263 struct cx18_mailbox mb;
257 struct cx18_mdl_ack mdl_ack[CX18_MAX_MDL_ACKS]; 264 struct cx18_mdl_ack mdl_ack[CX18_MAX_MDL_ACKS];
diff --git a/drivers/media/video/cx18/cx18-mailbox.c b/drivers/media/video/cx18/cx18-mailbox.c
index d49c7c27c18f..844a62de6535 100644
--- a/drivers/media/video/cx18/cx18-mailbox.c
+++ b/drivers/media/video/cx18/cx18-mailbox.c
@@ -233,7 +233,7 @@ void cx18_epu_work_handler(struct work_struct *work)
233 * Functions that run in an interrupt handling context 233 * Functions that run in an interrupt handling context
234 */ 234 */
235 235
236static void mb_ack_irq(struct cx18 *cx, const struct cx18_epu_work_order *order) 236static void mb_ack_irq(struct cx18 *cx, struct cx18_epu_work_order *order)
237{ 237{
238 struct cx18_mailbox __iomem *ack_mb; 238 struct cx18_mailbox __iomem *ack_mb;
239 u32 ack_irq, req; 239 u32 ack_irq, req;
@@ -256,15 +256,20 @@ static void mb_ack_irq(struct cx18 *cx, const struct cx18_epu_work_order *order)
256 req = order->mb.request; 256 req = order->mb.request;
257 /* Don't ack if the RPU has gotten impatient and timed us out */ 257 /* Don't ack if the RPU has gotten impatient and timed us out */
258 if (req != cx18_readl(cx, &ack_mb->request) || 258 if (req != cx18_readl(cx, &ack_mb->request) ||
259 req == cx18_readl(cx, &ack_mb->ack)) 259 req == cx18_readl(cx, &ack_mb->ack)) {
260 CX18_WARN("Possibly falling behind: %s self-ack'ed our incoming"
261 " %s to EPU mailbox (sequence no. %u) while "
262 "processing\n",
263 rpu_str[order->rpu], rpu_str[order->rpu], req);
264 order->flags |= CX18_F_EWO_MB_STALE_WHILE_PROC;
260 return; 265 return;
266 }
261 cx18_writel(cx, req, &ack_mb->ack); 267 cx18_writel(cx, req, &ack_mb->ack);
262 cx18_write_reg_expect(cx, ack_irq, SW2_INT_SET, ack_irq, ack_irq); 268 cx18_write_reg_expect(cx, ack_irq, SW2_INT_SET, ack_irq, ack_irq);
263 return; 269 return;
264} 270}
265 271
266static int epu_dma_done_irq(struct cx18 *cx, struct cx18_epu_work_order *order, 272static int epu_dma_done_irq(struct cx18 *cx, struct cx18_epu_work_order *order)
267 int stale)
268{ 273{
269 u32 handle, mdl_ack_offset, mdl_ack_count; 274 u32 handle, mdl_ack_offset, mdl_ack_count;
270 struct cx18_mailbox *mb; 275 struct cx18_mailbox *mb;
@@ -276,20 +281,21 @@ static int epu_dma_done_irq(struct cx18 *cx, struct cx18_epu_work_order *order,
276 281
277 if (handle == CX18_INVALID_TASK_HANDLE || 282 if (handle == CX18_INVALID_TASK_HANDLE ||
278 mdl_ack_count == 0 || mdl_ack_count > CX18_MAX_MDL_ACKS) { 283 mdl_ack_count == 0 || mdl_ack_count > CX18_MAX_MDL_ACKS) {
279 if (!stale) 284 if ((order->flags & CX18_F_EWO_MB_STALE) == 0)
280 mb_ack_irq(cx, order); 285 mb_ack_irq(cx, order);
281 return -1; 286 return -1;
282 } 287 }
283 288
284 cx18_memcpy_fromio(cx, order->mdl_ack, cx->enc_mem + mdl_ack_offset, 289 cx18_memcpy_fromio(cx, order->mdl_ack, cx->enc_mem + mdl_ack_offset,
285 sizeof(struct cx18_mdl_ack) * mdl_ack_count); 290 sizeof(struct cx18_mdl_ack) * mdl_ack_count);
286 if (!stale) 291
292 if ((order->flags & CX18_F_EWO_MB_STALE) == 0)
287 mb_ack_irq(cx, order); 293 mb_ack_irq(cx, order);
288 return 1; 294 return 1;
289} 295}
290 296
291static 297static
292int epu_debug_irq(struct cx18 *cx, struct cx18_epu_work_order *order, int stale) 298int epu_debug_irq(struct cx18 *cx, struct cx18_epu_work_order *order)
293{ 299{
294 u32 str_offset; 300 u32 str_offset;
295 char *str = order->str; 301 char *str = order->str;
@@ -303,14 +309,14 @@ int epu_debug_irq(struct cx18 *cx, struct cx18_epu_work_order *order, int stale)
303 cx18_setup_page(cx, SCB_OFFSET); 309 cx18_setup_page(cx, SCB_OFFSET);
304 } 310 }
305 311
306 if (!stale) 312 if ((order->flags & CX18_F_EWO_MB_STALE) == 0)
307 mb_ack_irq(cx, order); 313 mb_ack_irq(cx, order);
308 314
309 return str_offset ? 1 : 0; 315 return str_offset ? 1 : 0;
310} 316}
311 317
312static inline 318static inline
313int epu_cmd_irq(struct cx18 *cx, struct cx18_epu_work_order *order, int stale) 319int epu_cmd_irq(struct cx18 *cx, struct cx18_epu_work_order *order)
314{ 320{
315 int ret = -1; 321 int ret = -1;
316 322
@@ -319,10 +325,10 @@ int epu_cmd_irq(struct cx18 *cx, struct cx18_epu_work_order *order, int stale)
319 { 325 {
320 switch (order->mb.cmd) { 326 switch (order->mb.cmd) {
321 case CX18_EPU_DMA_DONE: 327 case CX18_EPU_DMA_DONE:
322 ret = epu_dma_done_irq(cx, order, stale); 328 ret = epu_dma_done_irq(cx, order);
323 break; 329 break;
324 case CX18_EPU_DEBUG: 330 case CX18_EPU_DEBUG:
325 ret = epu_debug_irq(cx, order, stale); 331 ret = epu_debug_irq(cx, order);
326 break; 332 break;
327 default: 333 default:
328 CX18_WARN("Unknown CPU to EPU mailbox command %#0x\n", 334 CX18_WARN("Unknown CPU to EPU mailbox command %#0x\n",
@@ -370,7 +376,6 @@ void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu)
370 struct cx18_mailbox __iomem *mb; 376 struct cx18_mailbox __iomem *mb;
371 struct cx18_mailbox *order_mb; 377 struct cx18_mailbox *order_mb;
372 struct cx18_epu_work_order *order; 378 struct cx18_epu_work_order *order;
373 int stale = 0;
374 int submit; 379 int submit;
375 380
376 switch (rpu) { 381 switch (rpu) {
@@ -391,6 +396,7 @@ void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu)
391 return; 396 return;
392 } 397 }
393 398
399 order->flags = 0;
394 order->rpu = rpu; 400 order->rpu = rpu;
395 order_mb = &order->mb; 401 order_mb = &order->mb;
396 cx18_memcpy_fromio(cx, order_mb, mb, sizeof(struct cx18_mailbox)); 402 cx18_memcpy_fromio(cx, order_mb, mb, sizeof(struct cx18_mailbox));
@@ -400,14 +406,14 @@ void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu)
400 " %s to EPU mailbox (sequence no. %u)\n", 406 " %s to EPU mailbox (sequence no. %u)\n",
401 rpu_str[rpu], rpu_str[rpu], order_mb->request); 407 rpu_str[rpu], rpu_str[rpu], order_mb->request);
402 dump_mb(cx, order_mb, "incoming"); 408 dump_mb(cx, order_mb, "incoming");
403 stale = 1; 409 order->flags = CX18_F_EWO_MB_STALE_UPON_RECEIPT;
404 } 410 }
405 411
406 /* 412 /*
407 * Individual EPU command processing is responsible for ack-ing 413 * Individual EPU command processing is responsible for ack-ing
408 * a non-stale mailbox as soon as possible 414 * a non-stale mailbox as soon as possible
409 */ 415 */
410 submit = epu_cmd_irq(cx, order, stale); 416 submit = epu_cmd_irq(cx, order);
411 if (submit > 0) { 417 if (submit > 0) {
412 queue_work(cx18_work_queue, &order->work); 418 queue_work(cx18_work_queue, &order->work);
413 } 419 }